ppsingh commited on
Commit
75070a9
1 Parent(s): 3cad106

Update appStore/target.py

Browse files
Files changed (1) hide show
  1. appStore/target.py +1 -351
appStore/target.py CHANGED
@@ -27,90 +27,6 @@ from pandas.api.types import (
27
  classifier_identifier = 'target'
28
  params = get_classifier_params(classifier_identifier)
29
 
30
- ## Labels dictionary ###
31
- _lab_dict = {
32
- 'NEGATIVE':'NO TARGET INFO',
33
- 'TARGET':'TARGET',
34
- }
35
-
36
- # @st.cache_data
37
- def to_excel(df):
38
- # df['Target Validation'] = 'No'
39
- # df['Netzero Validation'] = 'No'
40
- # df['GHG Validation'] = 'No'
41
- # df['Adapt-Mitig Validation'] = 'No'
42
- # df['Sector'] = 'No'
43
- len_df = len(df)
44
- output = BytesIO()
45
- writer = pd.ExcelWriter(output, engine='xlsxwriter')
46
- df.to_excel(writer, index=False, sheet_name='rawdata')
47
- if 'target_hits' in st.session_state:
48
- target_hits = st.session_state['target_hits']
49
- if 'keep' in target_hits.columns:
50
-
51
- target_hits = target_hits[target_hits.keep == True]
52
- target_hits = target_hits.reset_index(drop=True)
53
- target_hits.drop(columns = ['keep'], inplace=True)
54
- target_hits.to_excel(writer,index=False,sheet_name = 'Target')
55
- else:
56
-
57
- target_hits = target_hits.sort_values(by=['Target Score'], ascending=False)
58
- target_hits = target_hits.reset_index(drop=True)
59
- target_hits.to_excel(writer,index=False,sheet_name = 'Target')
60
-
61
- else:
62
- target_hits = df[df['Target Label'] == True]
63
- target_hits.drop(columns=['Target Label','Netzero Score','GHG Score','Action Label',
64
- 'Action Score','Policies_Plans Label','Indicator Label',
65
- 'Policies_Plans Score','Conditional Score'],inplace=True)
66
- target_hits = target_hits.sort_values(by=['Target Score'], ascending=False)
67
- target_hits = target_hits.reset_index(drop=True)
68
- target_hits.to_excel(writer,index=False,sheet_name = 'Target')
69
-
70
-
71
- if 'action_hits' in st.session_state:
72
- action_hits = st.session_state['action_hits']
73
- if 'keep' in action_hits.columns:
74
- action_hits = action_hits[action_hits.keep == True]
75
- action_hits = action_hits.reset_index(drop=True)
76
- action_hits.drop(columns = ['keep'], inplace=True)
77
- action_hits.to_excel(writer,index=False,sheet_name = 'Action')
78
- else:
79
- action_hits = action_hits.sort_values(by=['Action Score'], ascending=False)
80
- action_hits = action_hits.reset_index(drop=True)
81
- action_hits.to_excel(writer,index=False,sheet_name = 'Action')
82
- else:
83
- action_hits = df[df['Action Label'] == True]
84
- action_hits.drop(columns=['Target Label','Target Score','Netzero Score',
85
- 'Netzero Label','GHG Label',
86
- 'GHG Score','Action Label','Policies_Plans Label',
87
- 'Policies_Plans Score','Conditional Score'],inplace=True)
88
- action_hits = action_hits.sort_values(by=['Action Score'], ascending=False)
89
- action_hits = action_hits.reset_index(drop=True)
90
- action_hits.to_excel(writer,index=False,sheet_name = 'Action')
91
-
92
- # hits = hits.drop(columns = ['Target Score','Netzero Score','GHG Score'])
93
- workbook = writer.book
94
- # worksheet = writer.sheets['Sheet1']
95
- # worksheet.data_validation('L2:L{}'.format(len_df),
96
- # {'validate': 'list',
97
- # 'source': ['No', 'Yes', 'Discard']})
98
- # worksheet.data_validation('M2:L{}'.format(len_df),
99
- # {'validate': 'list',
100
- # 'source': ['No', 'Yes', 'Discard']})
101
- # worksheet.data_validation('N2:L{}'.format(len_df),
102
- # {'validate': 'list',
103
- # 'source': ['No', 'Yes', 'Discard']})
104
- # worksheet.data_validation('O2:L{}'.format(len_df),
105
- # {'validate': 'list',
106
- # 'source': ['No', 'Yes', 'Discard']})
107
- # worksheet.data_validation('P2:L{}'.format(len_df),
108
- # {'validate': 'list',
109
- # 'source': ['No', 'Yes', 'Discard']})
110
- writer.save()
111
- processed_data = output.getvalue()
112
- return processed_data
113
-
114
  def app():
115
  ### Main app code ###
116
  with st.container():
@@ -127,272 +43,6 @@ def app():
127
 
128
  df = target_classification(haystack_doc=df,
129
  threshold= params['threshold'])
130
- st.dataframe(df)
131
  st.session_state.key1 = df
132
 
133
- def filter_for_tracs(df):
134
- sector_list = ['Transport','Energy','Economy-wide']
135
- df['check'] = df['Sector Label'].apply(lambda x: any(i in x for i in sector_list))
136
- df = df[df.check == True].reset_index(drop=True)
137
- df['Sector Label'] = df['Sector Label'].apply(lambda x: [i for i in x if i in sector_list])
138
- df.drop(columns = ['check'],inplace=True)
139
- return df
140
-
141
- def target_display():
142
- if 'key1' in st.session_state:
143
- df = st.session_state.key1
144
- st.caption(""" **{}** is splitted into **{}** paragraphs/text chunks."""\
145
- .format(os.path.basename(st.session_state['filename']),
146
- len(df)))
147
- hits = df[df['Target Label'] == 'TARGET'].reset_index(drop=True)
148
- range_val = min(5,len(hits))
149
- if range_val !=0:
150
- # collecting some statistics
151
- count_target = sum(hits['Target Label'] == 'TARGET')
152
- count_netzero = sum(hits['Netzero Label'] == 'NETZERO TARGET')
153
- count_ghg = sum(hits['GHG Label'] == 'GHG')
154
- count_transport = sum([True if 'Transport' in x else False
155
- for x in hits['Sector Label']])
156
-
157
- c1, c2 = st.columns([1,1])
158
- with c1:
159
- st.write('**Target Paragraphs**: `{}`'.format(count_target))
160
- st.write('**NetZero Related Paragraphs**: `{}`'.format(count_netzero))
161
- with c2:
162
- st.write('**GHG Target Related Paragraphs**: `{}`'.format(count_ghg))
163
- st.write('**Transport Related Paragraphs**: `{}`'.format(count_transport))
164
- # st.write('-------------------')
165
- hits.drop(columns=['Target Label','Netzero Score','GHG Score','Action Label',
166
- 'Action Score','Policies_Plans Label','Indicator Label',
167
- 'Policies_Plans Score','Conditional Score'],inplace=True)
168
- hits = hits.sort_values(by=['Target Score'], ascending=False)
169
- hits = hits.reset_index(drop=True)
170
-
171
- # netzerohit = hits[hits['Netzero Label'] == 'NETZERO']
172
- # if not netzerohit.empty:
173
- # netzerohit = netzerohit.sort_values(by = ['Netzero Score'], ascending = False)
174
- # # st.write('-------------------')
175
- # # st.markdown("###### Netzero paragraph ######")
176
- # st.write('**Netzero paragraph** `page {}`: {}'.format(netzerohit.iloc[0]['page'],
177
- # netzerohit.iloc[0]['text'].replace("\n", " ")))
178
- # st.write("")
179
- # else:
180
- # st.info("🤔 No Netzero paragraph found")
181
-
182
- # # st.write("**Result {}** `page {}` (Relevancy Score: {:.2f})'".format(i+1,hits.iloc[i]['page'],hits.iloc[i]['Relevancy'])")
183
- # st.write('-------------------')
184
- # st.markdown("###### Top few Target Classified paragraph/text results ######")
185
- # range_val = min(5,len(hits))
186
- # for i in range(range_val):
187
- # # the page number reflects the page that contains the main paragraph
188
- # # according to split limit, the overlapping part can be on a separate page
189
- # st.write('**Result {}** (Relevancy Score: {:.2f}): `page {}`, `Sector: {}`,\
190
- # `GHG: {}`, `Adapt-Mitig :{}`'\
191
- # .format(i+1,hits.iloc[i]['Relevancy'],
192
- # hits.iloc[i]['page'], hits.iloc[i]['Sector Label'],
193
- # hits.iloc[i]['GHG Label'],hits.iloc[i]['Adapt-Mitig Label']))
194
- # st.write("\t Text: \t{}".format(hits.iloc[i]['text'].replace("\n", " ")))
195
- # hits = hits.reset_index(drop =True)
196
- st.write('----------------')
197
-
198
-
199
- st.caption("Filter table to select rows to keep for Target category")
200
- hits = filter_for_tracs(hits)
201
- convert_type = {'Netzero Label': 'category',
202
- 'Conditional Label':'category',
203
- 'GHG Label':'category',
204
- }
205
- hits = hits.astype(convert_type)
206
- filter_dataframe(hits)
207
-
208
- # filtered_df = filtered_df[filtered_df.keep == True]
209
- # st.write('Explore the data')
210
- # AgGrid(hits)
211
-
212
-
213
- with st.sidebar:
214
- st.write('-------------')
215
- df_xlsx = to_excel(df)
216
- st.download_button(label='📥 Download Result',
217
- data=df_xlsx ,
218
- file_name= os.path.splitext(os.path.basename(st.session_state['filename']))[0]+'.xlsx')
219
-
220
- # st.write(
221
- # """This app accomodates the blog [here](https://blog.streamlit.io/auto-generate-a-dataframe-filtering-ui-in-streamlit-with-filter_dataframe/)
222
- # and walks you through one example of how the Streamlit
223
- # Data Science Team builds add-on functions to Streamlit.
224
- # """
225
- # )
226
-
227
-
228
- def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame:
229
- """
230
- Adds a UI on top of a dataframe to let viewers filter columns
231
-
232
- Args:
233
- df (pd.DataFrame): Original dataframe
234
-
235
- Returns:
236
- pd.DataFrame: Filtered dataframe
237
- """
238
- modify = st.checkbox("Add filters")
239
-
240
- if not modify:
241
- st.session_state['target_hits'] = df
242
- return
243
-
244
-
245
- # df = df.copy()
246
- # st.write(len(df))
247
-
248
- # Try to convert datetimes into a standard format (datetime, no timezone)
249
- # for col in df.columns:
250
- # if is_object_dtype(df[col]):
251
- # try:
252
- # df[col] = pd.to_datetime(df[col])
253
- # except Exception:
254
- # pass
255
-
256
- # if is_datetime64_any_dtype(df[col]):
257
- # df[col] = df[col].dt.tz_localize(None)
258
-
259
- modification_container = st.container()
260
-
261
- with modification_container:
262
- cols = list(set(df.columns) -{'page','Extracted Text'})
263
- cols.sort()
264
- to_filter_columns = st.multiselect("Filter dataframe on", cols
265
- )
266
- for column in to_filter_columns:
267
- left, right = st.columns((1, 20))
268
- left.write("↳")
269
- # Treat columns with < 10 unique values as categorical
270
- if is_categorical_dtype(df[column]):
271
- # st.write(type(df[column][0]), column)
272
- user_cat_input = right.multiselect(
273
- f"Values for {column}",
274
- df[column].unique(),
275
- default=list(df[column].unique()),
276
- )
277
- df = df[df[column].isin(user_cat_input)]
278
- elif is_numeric_dtype(df[column]):
279
- _min = float(df[column].min())
280
- _max = float(df[column].max())
281
- step = (_max - _min) / 100
282
- user_num_input = right.slider(
283
- f"Values for {column}",
284
- _min,
285
- _max,
286
- (_min, _max),
287
- step=step,
288
- )
289
- df = df[df[column].between(*user_num_input)]
290
- elif is_list_like(df[column]) & (type(df[column][0]) == list) :
291
- list_vals = set(x for lst in df[column].tolist() for x in lst)
292
- user_multi_input = right.multiselect(
293
- f"Values for {column}",
294
- list_vals,
295
- default=list_vals,
296
- )
297
- df['check'] = df[column].apply(lambda x: any(i in x for i in user_multi_input))
298
- df = df[df.check == True]
299
- df.drop(columns = ['check'],inplace=True)
300
-
301
- # df[df[column].between(*user_num_input)]
302
- # elif is_datetime64_any_dtype(df[column]):
303
- # user_date_input = right.date_input(
304
- # f"Values for {column}",
305
- # value=(
306
- # df[column].min(),
307
- # df[column].max(),
308
- # ),
309
- # )
310
- # if len(user_date_input) == 2:
311
- # user_date_input = tuple(map(pd.to_datetime, user_date_input))
312
- # start_date, end_date = user_date_input
313
- # df = df.loc[df[column].between(start_date, end_date)]
314
- else:
315
- user_text_input = right.text_input(
316
- f"Substring or regex in {column}",
317
- )
318
- if user_text_input:
319
- df = df[df[column].str.lower().str.contains(user_text_input)]
320
-
321
- df = df.reset_index(drop=True)
322
-
323
- st.session_state['target_hits'] = df
324
- df['IKI_Netzero'] = df.apply(lambda x: 'T_NETZERO' if ((x['Netzero Label'] == 'NETZERO TARGET') &
325
- (x['Conditional Label'] == 'UNCONDITIONAL'))
326
- else 'T_NETZERO_C' if ((x['Netzero Label'] == 'NETZERO TARGET') &
327
- (x['Conditional Label'] == 'CONDITIONAL')
328
- )
329
- else None, axis=1
330
- )
331
- def check_t(s,c):
332
- temp = []
333
- if (('Transport' in s) & (c== 'UNCONDITIONAL')):
334
- temp.append('T_Transport_Unc')
335
- if (('Transport' in s) & (c == 'CONDITIONAL')):
336
- temp.append('T_Transport_C')
337
- if (('Economy-wide' in s) & (c == 'CONDITIONAL')):
338
- temp.append('T_Economy_C')
339
- if (('Economy-wide' in s) & (c == 'UNCONDITIONAL')):
340
- temp.append('T_Economy_Unc')
341
- if (('Energy' in s) & (c == 'CONDITIONAL')):
342
- temp.append('T_Energy_C')
343
- if (('Energy' in s) & (c == 'UNCONDITIONAL')):
344
- temp.append('T_Economy_Unc')
345
- return temp
346
- df['IKI_Target'] = df.apply(lambda x:check_t(x['Sector Label'], x['Conditional Label']),
347
- axis=1 )
348
-
349
- # target_hits = st.session_state['target_hits']
350
- df['keep'] = True
351
-
352
-
353
- df = df[['text','IKI_Netzero','IKI_Target','Target Score','Netzero Label','GHG Label',
354
- 'Conditional Label','Sector Label','Adapt-Mitig Label','page','keep']]
355
- st.dataframe(df)
356
- # df = st.data_editor(
357
- # df,
358
- # column_config={
359
- # "keep": st.column_config.CheckboxColumn(
360
- # help="Select which rows to keep",
361
- # default=False,
362
- # )
363
- # },
364
- # disabled=list(set(df.columns) - {'keep'}),
365
- # hide_index=True,
366
- # )
367
- # st.write("updating target hits....")
368
- # st.write(len(df[df.keep == True]))
369
- st.session_state['target_hits'] = df
370
-
371
- return
372
-
373
-
374
- # df = pd.read_csv(
375
- # "https://raw.githubusercontent.com/mcnakhaee/palmerpenguins/master/palmerpenguins/data/penguins.csv"
376
- # )
377
-
378
-
379
- # else:
380
- # st.info("🤔 No Targets found")
381
- # count_df = df['Target Label'].value_counts()
382
- # count_df = count_df.rename('count')
383
- # count_df = count_df.rename_axis('Target Label').reset_index()
384
- # count_df['Label_def'] = count_df['Target Label'].apply(lambda x: _lab_dict[x])
385
- # st.plotly_chart(fig,use_container_width= True)
386
-
387
- # count_netzero = sum(hits['Netzero Label'] == 'NETZERO')
388
- # count_ghg = sum(hits['GHG Label'] == 'LABEL_2')
389
- # count_economy = sum([True if 'Economy-wide' in x else False
390
- # for x in hits['Sector Label']])
391
- # # excel part
392
- # temp = df[df['Relevancy']>threshold]
393
-
394
- # df['Validation'] = 'No'
395
- # df_xlsx = to_excel(df)
396
- # st.download_button(label='📥 Download Current Result',
397
- # data=df_xlsx ,
398
- # file_name= 'file_target.xlsx')
 
27
  classifier_identifier = 'target'
28
  params = get_classifier_params(classifier_identifier)
29
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  def app():
31
  ### Main app code ###
32
  with st.container():
 
43
 
44
  df = target_classification(haystack_doc=df,
45
  threshold= params['threshold'])
46
+ st.write(df)
47
  st.session_state.key1 = df
48