thbndi commited on
Commit
d198eb0
1 Parent(s): 8ae3dd5

Upload data_generation_modify.py

Browse files
Files changed (1) hide show
  1. data_generation_modify.py +483 -0
data_generation_modify.py ADDED
@@ -0,0 +1,483 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ from tqdm import tqdm
4
+ from datetime import datetime
5
+ import pickle
6
+ import datetime
7
+ import os
8
+ import sys
9
+ from pathlib import Path
10
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)) + './../..')
11
+ if not os.path.exists("./data/dict"):
12
+ os.makedirs("./data/dict")
13
+
14
+ class Generator():
15
+ def __init__(self,cohort_output,if_mort,if_admn,if_los,feat_cond,feat_lab,feat_proc,feat_med,impute,include_time=24,bucket=1,predW=0):
16
+ self.impute=impute
17
+ self.feat_cond,self.feat_proc,self.feat_med,self.feat_lab = feat_cond,feat_proc,feat_med,feat_lab
18
+ self.cohort_output=cohort_output
19
+
20
+ self.data = self.generate_adm()
21
+ print("[ READ COHORT ]")
22
+ self.generate_feat()
23
+ print("[ READ ALL FEATURES ]")
24
+ if if_mort:
25
+ print(predW)
26
+ self.mortality_length(include_time,predW)
27
+ print("[ PROCESSED TIME SERIES TO EQUAL LENGTH ]")
28
+ elif if_admn:
29
+ self.readmission_length(include_time)
30
+ print("[ PROCESSED TIME SERIES TO EQUAL LENGTH ]")
31
+ elif if_los:
32
+ self.los_length(include_time)
33
+ print("[ PROCESSED TIME SERIES TO EQUAL LENGTH ]")
34
+ self.smooth_meds(bucket)
35
+
36
+ #if(self.feat_lab):
37
+ # print("[ ======READING LABS ]")
38
+ # nhid=len(self.hids)
39
+ # for n in range(0,nhids,10000):
40
+ # self.generate_labs(self.hids[n,n+10000])
41
+ print("[ SUCCESSFULLY SAVED DATA DICTIONARIES ]")
42
+
43
+ def generate_feat(self):
44
+ if(self.feat_cond):
45
+ print("[ ======READING DIAGNOSIS ]")
46
+ self.generate_cond()
47
+ if(self.feat_proc):
48
+ print("[ ======READING PROCEDURES ]")
49
+ self.generate_proc()
50
+ if(self.feat_med):
51
+ print("[ ======READING MEDICATIONS ]")
52
+ self.generate_meds()
53
+ if(self.feat_lab):
54
+ print("[ ======READING LABS ]")
55
+ self.generate_labs()
56
+
57
+
58
+ def generate_adm(self):
59
+ data=pd.read_csv(f"./data/cohort/{self.cohort_output}.csv.gz", compression='gzip', header=0, index_col=None)
60
+ data['admittime'] = pd.to_datetime(data['admittime'])
61
+ data['dischtime'] = pd.to_datetime(data['dischtime'])
62
+ data['los']=pd.to_timedelta(data['dischtime']-data['admittime'],unit='h')
63
+ data['los']=data['los'].astype(str)
64
+ data[['days', 'dummy','hours']] = data['los'].str.split(' ', -1, expand=True)
65
+ data[['hours','min','sec']] = data['hours'].str.split(':', -1, expand=True)
66
+ data['los']=pd.to_numeric(data['days'])*24+pd.to_numeric(data['hours'])
67
+ data=data.drop(columns=['days', 'dummy','hours','min','sec'])
68
+ data=data[data['los']>0]
69
+ data['Age']=data['Age'].astype(int)
70
+ return data
71
+
72
+ def generate_cond(self):
73
+ cond=pd.read_csv("./data/features/preproc_diag.csv.gz", compression='gzip', header=0, index_col=None)
74
+ cond=cond[cond['hadm_id'].isin(self.data['hadm_id'])]
75
+ cond_per_adm = cond.groupby('hadm_id').size().max()
76
+ self.cond, self.cond_per_adm = cond, cond_per_adm
77
+
78
+ def generate_proc(self):
79
+ proc=pd.read_csv("./data/features/preproc_proc.csv.gz", compression='gzip', header=0, index_col=None)
80
+ proc=proc[proc['hadm_id'].isin(self.data['hadm_id'])]
81
+ proc[['start_days', 'dummy','start_hours']] = proc['proc_time_from_admit'].str.split(' ', -1, expand=True)
82
+ proc[['start_hours','min','sec']] = proc['start_hours'].str.split(':', -1, expand=True)
83
+ proc['start_time']=pd.to_numeric(proc['start_days'])*24+pd.to_numeric(proc['start_hours'])
84
+ proc=proc.drop(columns=['start_days', 'dummy','start_hours','min','sec'])
85
+ proc=proc[proc['start_time']>=0]
86
+
87
+ ###Remove where event time is after discharge time
88
+ proc=pd.merge(proc,self.data[['hadm_id','los']],on='hadm_id',how='left')
89
+ proc['sanity']=proc['los']-proc['start_time']
90
+ proc=proc[proc['sanity']>0]
91
+ del proc['sanity']
92
+
93
+ self.proc=proc
94
+
95
+ def generate_labs(self):
96
+ chunksize = 10000000
97
+ final=pd.DataFrame()
98
+ for labs in tqdm(pd.read_csv("./data/features/preproc_labs.csv.gz", compression='gzip', header=0, index_col=None,chunksize=chunksize)):
99
+ labs=labs[labs['hadm_id'].isin(self.data['hadm_id'])]
100
+ labs[['start_days', 'dummy','start_hours']] = labs['lab_time_from_admit'].str.split(' ', -1, expand=True)
101
+ labs[['start_hours','min','sec']] = labs['start_hours'].str.split(':', -1, expand=True)
102
+ labs['start_time']=pd.to_numeric(labs['start_days'])*24+pd.to_numeric(labs['start_hours'])
103
+ labs=labs.drop(columns=['start_days', 'dummy','start_hours','min','sec'])
104
+ labs=labs[labs['start_time']>=0]
105
+
106
+ ###Remove where event time is after discharge time
107
+ labs=pd.merge(labs,self.data[['hadm_id','los']],on='hadm_id',how='left')
108
+ labs['sanity']=labs['los']-labs['start_time']
109
+ labs=labs[labs['sanity']>0]
110
+ del labs['sanity']
111
+
112
+ if final.empty:
113
+ final=labs
114
+ else:
115
+ final=final.append(labs, ignore_index=True)
116
+
117
+ self.labs=final
118
+
119
+ def generate_meds(self):
120
+ meds=pd.read_csv("./data/features/preproc_med.csv.gz", compression='gzip', header=0, index_col=None)
121
+ meds[['start_days', 'dummy','start_hours']] = meds['start_hours_from_admit'].str.split(' ', -1, expand=True)
122
+ meds[['start_hours','min','sec']] = meds['start_hours'].str.split(':', -1, expand=True)
123
+ meds['start_time']=pd.to_numeric(meds['start_days'])*24+pd.to_numeric(meds['start_hours'])
124
+ meds[['start_days', 'dummy','start_hours']] = meds['stop_hours_from_admit'].str.split(' ', -1, expand=True)
125
+ meds[['start_hours','min','sec']] = meds['start_hours'].str.split(':', -1, expand=True)
126
+ meds['stop_time']=pd.to_numeric(meds['start_days'])*24+pd.to_numeric(meds['start_hours'])
127
+ meds=meds.drop(columns=['start_days', 'dummy','start_hours','min','sec'])
128
+ #####Sanity check
129
+ meds['sanity']=meds['stop_time']-meds['start_time']
130
+ meds=meds[meds['sanity']>0]
131
+ del meds['sanity']
132
+ #####Select hadm_id as in main file
133
+ meds=meds[meds['hadm_id'].isin(self.data['hadm_id'])]
134
+ meds=pd.merge(meds,self.data[['hadm_id','los']],on='hadm_id',how='left')
135
+
136
+ #####Remove where start time is after end of visit
137
+ meds['sanity']=meds['los']-meds['start_time']
138
+ meds=meds[meds['sanity']>0]
139
+ del meds['sanity']
140
+ ####Any stop_time after end of visit is set at end of visit
141
+ meds.loc[meds['stop_time'] > meds['los'],'stop_time']=meds.loc[meds['stop_time'] > meds['los'],'los']
142
+ del meds['los']
143
+
144
+ meds['dose_val_rx']=meds['dose_val_rx'].apply(pd.to_numeric, errors='coerce')
145
+
146
+
147
+ self.meds=meds
148
+
149
+
150
+ def mortality_length(self,include_time,predW):
151
+ self.los=include_time
152
+ self.data=self.data[(self.data['los']>=include_time+predW)]
153
+ self.hids=self.data['hadm_id'].unique()
154
+
155
+ if(self.feat_cond):
156
+ self.cond=self.cond[self.cond['hadm_id'].isin(self.data['hadm_id'])]
157
+
158
+ self.data['los']=include_time
159
+ ###MEDS
160
+ if(self.feat_med):
161
+ self.meds=self.meds[self.meds['hadm_id'].isin(self.data['hadm_id'])]
162
+ self.meds=self.meds[self.meds['start_time']<=include_time]
163
+ self.meds.loc[self.meds.stop_time >include_time, 'stop_time']=include_time
164
+
165
+
166
+ ###PROCS
167
+ if(self.feat_proc):
168
+ self.proc=self.proc[self.proc['hadm_id'].isin(self.data['hadm_id'])]
169
+ self.proc=self.proc[self.proc['start_time']<=include_time]
170
+
171
+ ###LAB
172
+ if(self.feat_lab):
173
+ self.labs=self.labs[self.labs['hadm_id'].isin(self.data['hadm_id'])]
174
+ self.labs=self.labs[self.labs['start_time']<=include_time]
175
+
176
+
177
+ self.los=include_time
178
+
179
+ def los_length(self,include_time):
180
+ self.los=include_time
181
+ self.data=self.data[(self.data['los']>=include_time)]
182
+ self.hids=self.data['hadm_id'].unique()
183
+
184
+ if(self.feat_cond):
185
+ self.cond=self.cond[self.cond['hadm_id'].isin(self.data['hadm_id'])]
186
+
187
+ self.data['los']=include_time
188
+ ###MEDS
189
+ if(self.feat_med):
190
+ self.meds=self.meds[self.meds['hadm_id'].isin(self.data['hadm_id'])]
191
+ self.meds=self.meds[self.meds['start_time']<=include_time]
192
+ self.meds.loc[self.meds.stop_time >include_time, 'stop_time']=include_time
193
+
194
+
195
+ ###PROCS
196
+ if(self.feat_proc):
197
+ self.proc=self.proc[self.proc['hadm_id'].isin(self.data['hadm_id'])]
198
+ self.proc=self.proc[self.proc['start_time']<=include_time]
199
+
200
+ ###LAB
201
+ if(self.feat_lab):
202
+ self.labs=self.labs[self.labs['hadm_id'].isin(self.data['hadm_id'])]
203
+ self.labs=self.labs[self.labs['start_time']<=include_time]
204
+
205
+
206
+ #self.los=include_time
207
+
208
+ def readmission_length(self,include_time):
209
+ self.los=include_time
210
+ self.data=self.data[(self.data['los']>=include_time)]
211
+ self.hids=self.data['hadm_id'].unique()
212
+ if(self.feat_cond):
213
+ self.cond=self.cond[self.cond['hadm_id'].isin(self.data['hadm_id'])]
214
+ self.data['select_time']=self.data['los']-include_time
215
+ self.data['los']=include_time
216
+
217
+ ####Make equal length input time series and remove data for pred window if needed
218
+
219
+ ###MEDS
220
+ if(self.feat_med):
221
+ self.meds=self.meds[self.meds['hadm_id'].isin(self.data['hadm_id'])]
222
+ self.meds=pd.merge(self.meds,self.data[['hadm_id','select_time']],on='hadm_id',how='left')
223
+ self.meds['stop_time']=self.meds['stop_time']-self.meds['select_time']
224
+ self.meds['start_time']=self.meds['start_time']-self.meds['select_time']
225
+ self.meds=self.meds[self.meds['stop_time']>=0]
226
+ self.meds.loc[self.meds.start_time <0, 'start_time']=0
227
+
228
+ ###PROCS
229
+ if(self.feat_proc):
230
+ self.proc=self.proc[self.proc['hadm_id'].isin(self.data['hadm_id'])]
231
+ self.proc=pd.merge(self.proc,self.data[['hadm_id','select_time']],on='hadm_id',how='left')
232
+ self.proc['start_time']=self.proc['start_time']-self.proc['select_time']
233
+ self.proc=self.proc[self.proc['start_time']>=0]
234
+
235
+ ###LABS
236
+ if(self.feat_lab):
237
+ self.labs=self.labs[self.labs['hadm_id'].isin(self.data['hadm_id'])]
238
+ self.labs=pd.merge(self.labs,self.data[['hadm_id','select_time']],on='hadm_id',how='left')
239
+ self.labs['start_time']=self.labs['start_time']-self.labs['select_time']
240
+ self.labs=self.labs[self.labs['start_time']>=0]
241
+
242
+
243
+ def smooth_meds(self,bucket):
244
+ final_meds=pd.DataFrame()
245
+ final_proc=pd.DataFrame()
246
+ final_labs=pd.DataFrame()
247
+
248
+ if(self.feat_med):
249
+ self.meds=self.meds.sort_values(by=['start_time'])
250
+ if(self.feat_proc):
251
+ self.proc=self.proc.sort_values(by=['start_time'])
252
+
253
+ t=0
254
+ for i in tqdm(range(0,self.los,bucket)):
255
+ ###MEDS
256
+ if(self.feat_med):
257
+ sub_meds=self.meds[(self.meds['start_time']>=i) & (self.meds['start_time']<i+bucket)].groupby(['hadm_id','drug_name']).agg({'stop_time':'max','subject_id':'max','dose_val_rx':np.nanmean})
258
+ sub_meds=sub_meds.reset_index()
259
+ sub_meds['start_time']=t
260
+ sub_meds['stop_time']=sub_meds['stop_time']/bucket
261
+ if final_meds.empty:
262
+ final_meds=sub_meds
263
+ else:
264
+ final_meds=final_meds.append(sub_meds)
265
+
266
+ ###PROC
267
+ if(self.feat_proc):
268
+ sub_proc=self.proc[(self.proc['start_time']>=i) & (self.proc['start_time']<i+bucket)].groupby(['hadm_id','icd_code']).agg({'subject_id':'max'})
269
+ sub_proc=sub_proc.reset_index()
270
+ sub_proc['start_time']=t
271
+ if final_proc.empty:
272
+ final_proc=sub_proc
273
+ else:
274
+ final_proc=final_proc.append(sub_proc)
275
+
276
+ ###LABS
277
+ if(self.feat_lab):
278
+ sub_labs=self.labs[(self.labs['start_time']>=i) & (self.labs['start_time']<i+bucket)].groupby(['hadm_id','itemid']).agg({'subject_id':'max','valuenum':np.nanmean})
279
+ sub_labs=sub_labs.reset_index()
280
+ sub_labs['start_time']=t
281
+ if final_labs.empty:
282
+ final_labs=sub_labs
283
+ else:
284
+ final_labs=final_labs.append(sub_labs)
285
+
286
+ t=t+1
287
+ los=int(self.los/bucket)
288
+
289
+ ###MEDS
290
+ if(self.feat_med):
291
+ f2_meds=final_meds.groupby(['hadm_id','drug_name']).size()
292
+ self.med_per_adm=f2_meds.groupby('hadm_id').sum().reset_index()[0].max()
293
+ self.medlength_per_adm=final_meds.groupby('hadm_id').size().max()
294
+
295
+ ###PROC
296
+ if(self.feat_proc):
297
+ f2_proc=final_proc.groupby(['hadm_id','icd_code']).size()
298
+ self.proc_per_adm=f2_proc.groupby('hadm_id').sum().reset_index()[0].max()
299
+ self.proclength_per_adm=final_proc.groupby('hadm_id').size().max()
300
+
301
+ ###LABS
302
+ if(self.feat_lab):
303
+ f2_labs=final_labs.groupby(['hadm_id','itemid']).size()
304
+ self.labs_per_adm=f2_labs.groupby('hadm_id').sum().reset_index()[0].max()
305
+ self.labslength_per_adm=final_labs.groupby('hadm_id').size().max()
306
+
307
+ ###CREATE DICT
308
+ print("[ PROCESSED TIME SERIES TO EQUAL TIME INTERVAL ]")
309
+ self.create_Dict(final_meds,final_proc,final_labs,los)
310
+
311
+
312
+ def create_Dict(self,meds,proc,labs,los):
313
+ print("[ CREATING DATA DICTIONARIES ]")
314
+ dataDic={}
315
+ labels_csv=pd.DataFrame(columns=['hadm_id','label'])
316
+ labels_csv['hadm_id']=pd.Series(self.hids)
317
+ labels_csv['label']=0
318
+ for hid in self.hids:
319
+ grp=self.data[self.data['hadm_id']==hid]
320
+ dataDic[hid]={'Cond':{},'Proc':{},'Med':{},'Lab':{},'ethnicity':grp['ethnicity'].iloc[0],'age':int(grp['Age']),'gender':grp['gender'].iloc[0],'label':int(grp['label'])}
321
+
322
+ for hid in tqdm(self.hids):
323
+ grp=self.data[self.data['hadm_id']==hid]
324
+
325
+ ###MEDS
326
+ if(self.feat_med):
327
+ feat=meds['drug_name'].unique()
328
+ df2=meds[meds['hadm_id']==hid]
329
+ if df2.shape[0]==0:
330
+ val=pd.DataFrame(np.zeros([los,len(feat)]),columns=feat)
331
+ val=val.fillna(0)
332
+ val.columns=pd.MultiIndex.from_product([["MEDS"], val.columns])
333
+ else:
334
+ val=df2.pivot_table(index='start_time',columns='drug_name',values='dose_val_rx')
335
+ df2=df2.pivot_table(index='start_time',columns='drug_name',values='stop_time')
336
+ #print(df2.shape)
337
+ add_indices = pd.Index(range(los)).difference(df2.index)
338
+ add_df = pd.DataFrame(index=add_indices, columns=df2.columns).fillna(np.nan)
339
+ df2=pd.concat([df2, add_df])
340
+ df2=df2.sort_index()
341
+ df2=df2.ffill()
342
+ df2=df2.fillna(0)
343
+
344
+ val=pd.concat([val, add_df])
345
+ val=val.sort_index()
346
+ val=val.ffill()
347
+ val=val.fillna(-1)
348
+ #print(df2.head())
349
+ df2.iloc[:,0:]=df2.iloc[:,0:].sub(df2.index,0)
350
+ df2[df2>0]=1
351
+ df2[df2<0]=0
352
+ val.iloc[:,0:]=df2.iloc[:,0:]*val.iloc[:,0:]
353
+ #print(df2.head())
354
+ dataDic[hid]['Med']['signal']=df2.iloc[:,0:].to_dict(orient="list")
355
+ dataDic[hid]['Med']['val']=val.iloc[:,0:].to_dict(orient="list")
356
+
357
+
358
+
359
+
360
+ ###PROCS
361
+ if(self.feat_proc):
362
+ feat=proc['icd_code'].unique()
363
+ df2=proc[proc['hadm_id']==hid]
364
+ if df2.shape[0]==0:
365
+ df2=pd.DataFrame(np.zeros([los,len(feat)]),columns=feat)
366
+ df2=df2.fillna(0)
367
+ df2.columns=pd.MultiIndex.from_product([["PROC"], df2.columns])
368
+ else:
369
+ df2['val']=1
370
+ df2=df2.pivot_table(index='start_time',columns='icd_code',values='val')
371
+ #print(df2.shape)
372
+ add_indices = pd.Index(range(los)).difference(df2.index)
373
+ add_df = pd.DataFrame(index=add_indices, columns=df2.columns).fillna(np.nan)
374
+ df2=pd.concat([df2, add_df])
375
+ df2=df2.sort_index()
376
+ df2=df2.fillna(0)
377
+ df2[df2>0]=1
378
+ #print(df2.head())
379
+ dataDic[hid]['Proc']=df2.to_dict(orient="list")
380
+
381
+
382
+ ###LABS
383
+ if(self.feat_lab):
384
+ feat=labs['itemid'].unique()
385
+ df2=labs[labs['hadm_id']==hid]
386
+ if df2.shape[0]==0:
387
+ val=pd.DataFrame(np.zeros([los,len(feat)]),columns=feat)
388
+ val=val.fillna(0)
389
+ val.columns=pd.MultiIndex.from_product([["LAB"], val.columns])
390
+ else:
391
+ val=df2.pivot_table(index='start_time',columns='itemid',values='valuenum')
392
+ df2['val']=1
393
+ df2=df2.pivot_table(index='start_time',columns='itemid',values='val')
394
+ #print(df2.shape)
395
+ add_indices = pd.Index(range(los)).difference(df2.index)
396
+ add_df = pd.DataFrame(index=add_indices, columns=df2.columns).fillna(np.nan)
397
+ df2=pd.concat([df2, add_df])
398
+ df2=df2.sort_index()
399
+ df2=df2.fillna(0)
400
+
401
+ val=pd.concat([val, add_df])
402
+ val=val.sort_index()
403
+ if self.impute=='Mean':
404
+ val=val.ffill()
405
+ val=val.bfill()
406
+ val=val.fillna(val.mean())
407
+ elif self.impute=='Median':
408
+ val=val.ffill()
409
+ val=val.bfill()
410
+ val=val.fillna(val.median())
411
+ val=val.fillna(0)
412
+
413
+ df2[df2>0]=1
414
+ df2[df2<0]=0
415
+
416
+ #print(df2.head())
417
+ dataDic[hid]['Lab']['signal']=df2.iloc[:,0:].to_dict(orient="list")
418
+ dataDic[hid]['Lab']['val']=val.iloc[:,0:].to_dict(orient="list")
419
+
420
+
421
+ ##########COND#########
422
+ if(self.feat_cond):
423
+ feat=self.cond['new_icd_code'].unique()
424
+ grp=self.cond[self.cond['hadm_id']==hid]
425
+ if(grp.shape[0]==0):
426
+ dataDic[hid]['Cond']={'fids':list(['<PAD>'])}
427
+
428
+ else:
429
+ dataDic[hid]['Cond']={'fids':list(grp['new_icd_code'])}
430
+
431
+
432
+
433
+ ######SAVE DICTIONARIES##############
434
+ metaDic={'Cond':{},'Proc':{},'Med':{},'Lab':{},'LOS':{}}
435
+ metaDic['LOS']=los
436
+ with open("./data/dict/dataDic", 'wb') as fp:
437
+ pickle.dump(dataDic, fp)
438
+
439
+ with open("./data/dict/hadmDic", 'wb') as fp:
440
+ pickle.dump(self.hids, fp)
441
+
442
+ with open("./data/dict/ethVocab", 'wb') as fp:
443
+ pickle.dump(list(self.data['ethnicity'].unique()), fp)
444
+ self.eth_vocab = self.data['ethnicity'].nunique()
445
+
446
+ with open("./data/dict/ageVocab", 'wb') as fp:
447
+ pickle.dump(list(self.data['Age'].unique()), fp)
448
+ self.age_vocab = self.data['Age'].nunique()
449
+
450
+ with open("./data/dict/insVocab", 'wb') as fp:
451
+ pickle.dump(list(self.data['insurance'].unique()), fp)
452
+ self.ins_vocab = self.data['insurance'].nunique()
453
+
454
+ if(self.feat_med):
455
+ with open("./data/dict/medVocab", 'wb') as fp:
456
+ pickle.dump(list(meds['drug_name'].unique()), fp)
457
+ self.med_vocab = meds['drug_name'].nunique()
458
+ metaDic['Med']=self.med_per_adm
459
+
460
+ if(self.feat_cond):
461
+ with open("./data/dict/condVocab", 'wb') as fp:
462
+ pickle.dump(list(self.cond['new_icd_code'].unique()), fp)
463
+ self.cond_vocab = self.cond['new_icd_code'].nunique()
464
+ metaDic['Cond']=self.cond_per_adm
465
+
466
+ if(self.feat_proc):
467
+ with open("./data/dict/procVocab", 'wb') as fp:
468
+ pickle.dump(list(proc['icd_code'].unique()), fp)
469
+ self.proc_vocab = proc['icd_code'].unique()
470
+ metaDic['Proc']=self.proc_per_adm
471
+
472
+ if(self.feat_lab):
473
+ with open("./data/dict/labsVocab", 'wb') as fp:
474
+ pickle.dump(list(labs['itemid'].unique()), fp)
475
+ self.lab_vocab = labs['itemid'].unique()
476
+ metaDic['Lab']=self.labs_per_adm
477
+
478
+ with open("./data/dict/metaDic", 'wb') as fp:
479
+ pickle.dump(metaDic, fp)
480
+
481
+
482
+
483
+