cpu_tracs / appStore /target.py
ppsingh's picture
target check
8f06cde
raw
history blame
No virus
8 kB
# set path
import glob, os, sys;
sys.path.append('../utils')
#import needed libraries
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import streamlit as st
from utils.target_classifier import load_targetClassifier, target_classification
import logging
logger = logging.getLogger(__name__)
from utils.config import get_classifier_params
from io import BytesIO
import xlsxwriter
import plotly.express as px
# Declare all the necessary variables
classifier_identifier = 'target'
params = get_classifier_params(classifier_identifier)
## Labels dictionary ###
_lab_dict = {
'NEGATIVE':'NO TARGET INFO',
'TARGET':'TARGET',
}
@st.cache_data
def to_excel(df):
df['Target Validation'] = 'No'
df['Netzero Validation'] = 'No'
df['GHG Validation'] = 'No'
df['Adapt-Mitig Validation'] = 'No'
df['Sector'] = 'No'
len_df = len(df)
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, index=False, sheet_name='Sheet1')
workbook = writer.book
worksheet = writer.sheets['Sheet1']
worksheet.data_validation('L2:L{}'.format(len_df),
{'validate': 'list',
'source': ['No', 'Yes', 'Discard']})
worksheet.data_validation('M2:L{}'.format(len_df),
{'validate': 'list',
'source': ['No', 'Yes', 'Discard']})
worksheet.data_validation('N2:L{}'.format(len_df),
{'validate': 'list',
'source': ['No', 'Yes', 'Discard']})
worksheet.data_validation('O2:L{}'.format(len_df),
{'validate': 'list',
'source': ['No', 'Yes', 'Discard']})
worksheet.data_validation('P2:L{}'.format(len_df),
{'validate': 'list',
'source': ['No', 'Yes', 'Discard']})
writer.save()
processed_data = output.getvalue()
return processed_data
def app():
#### APP INFO #####
# st.write(
# """
# The **Target Extraction** app is an easy-to-use interface built \
# in Streamlit for analyzing policy documents for \
# Classification of the paragraphs/texts in the document *If it \
# contains any Economy-Wide Targets related information* - \
# developed by GIZ Data Service Center, GFA, IKI Tracs, \
# SV Klima and SPA. \n
# """)
### Main app code ###
with st.container():
if 'key0' in st.session_state:
df = st.session_state.key0
#load Classifier
classifier = load_targetClassifier(classifier_name=params['model_name'])
st.session_state['{}_classifier'.format(classifier_identifier)] = classifier
if len(df) > 100:
warning_msg = ": This might take sometime, please sit back and relax."
else:
warning_msg = ""
df = target_classification(haystack_doc=df,
threshold= params['threshold'])
st.session_state.key1 = df
# # excel part
# temp = df[df['Relevancy']>threshold]
# df['Validation'] = 'No'
# df_xlsx = to_excel(df)
# st.download_button(label='πŸ“₯ Download Current Result',
# data=df_xlsx ,
# file_name= 'file_target.xlsx')
def target_display():
if 'key1' in st.session_state:
df = st.session_state.key1
hits = df[df['Target Label'] == 'TARGET']
st.table(hits)
# # hits['GHG Label'] = hits['GHG Label'].apply(lambda i: _lab_dict[i])
# range_val = min(5,len(hits))
# if range_val !=0:
# count_target = sum(hits['Target Label'] == 'TARGET')
# count_netzero = sum(hits['Netzero Label'] == 'NETZERO')
# count_ghg = sum(hits['GHG Label'] == 'GHG')
# count_economy = sum([True if 'Economy-wide' in x else False
# for x in hits['Sector Label']])
# # count_df = df['Target Label'].value_counts()
# # count_df = count_df.rename('count')
# # count_df = count_df.rename_axis('Target Label').reset_index()
# # count_df['Label_def'] = count_df['Target Label'].apply(lambda x: _lab_dict[x])
# # fig = px.bar(count_df, y="Label_def", x="count", orientation='h', height=200)
# c1, c2 = st.columns([1,1])
# with c1:
# st.write('**Target Paragraphs**: `{}`'.format(count_target))
# st.write('**NetZero Related Paragraphs**: `{}`'.format(count_netzero))
# # st.plotly_chart(fig,use_container_width= True)
# # count_netzero = sum(hits['Netzero Label'] == 'NETZERO')
# # count_ghg = sum(hits['GHG Label'] == 'LABEL_2')
# # count_economy = sum([True if 'Economy-wide' in x else False
# # for x in hits['Sector Label']])
# with c2:
# st.write('**GHG Related Paragraphs**: `{}`'.format(count_ghg))
# st.write('**Economy-wide Related Paragraphs**: `{}`'.format(count_economy))
# st.write('-------------------')
# hits = hits.sort_values(by=['Relevancy'], ascending=False)
# netzerohit = hits[hits['Netzero Label'] == 'NETZERO']
# if not netzerohit.empty:
# netzerohit = netzerohit.sort_values(by = ['Netzero Score'], ascending = False)
# # st.write('-------------------')
# # st.markdown("###### Netzero paragraph ######")
# st.write('**Netzero paragraph** `page {}`: {}'.format(netzerohit.iloc[0]['page'],
# netzerohit.iloc[0]['text'].replace("\n", " ")))
# st.write("")
# else:
# st.info("πŸ€” No Netzero paragraph found")
# # st.write("**Result {}** `page {}` (Relevancy Score: {:.2f})'".format(i+1,hits.iloc[i]['page'],hits.iloc[i]['Relevancy'])")
# st.write('-------------------')
# st.markdown("###### Top few Target Classified paragraph/text results ######")
# range_val = min(5,len(hits))
# for i in range(range_val):
# # the page number reflects the page that contains the main paragraph
# # according to split limit, the overlapping part can be on a separate page
# st.write('**Result {}** (Relevancy Score: {:.2f}): `page {}`, `Sector: {}`,\
# `GHG: {}`, `Adapt-Mitig :{}`'\
# .format(i+1,hits.iloc[i]['Relevancy'],
# hits.iloc[i]['page'], hits.iloc[i]['Sector Label'],
# hits.iloc[i]['GHG Label'],hits.iloc[i]['Adapt-Mitig Label']))
# st.write("\t Text: \t{}".format(hits.iloc[i]['text'].replace("\n", " ")))
# hits = hits.reset_index(drop =True)
# st.write('----------------')
# st.write('Explore the data')
# st.write(hits)
# df_xlsx = to_excel(df)
# with st.sidebar:
# st.write('-------------')
# st.download_button(label='πŸ“₯ Download Result',
# data=df_xlsx ,
# file_name= os.path.splitext(st.session_state['filename'])[0]+'.xlsx')
# else:
# st.info("πŸ€” No Targets found")