# set path import glob, os, sys; sys.path.append('../utils') #import needed libraries import seaborn as sns import matplotlib.pyplot as plt import numpy as np import pandas as pd import streamlit as st from utils.target_classifier import load_targetClassifier, target_classification import logging logger = logging.getLogger(__name__) from utils.config import get_classifier_params from io import BytesIO import xlsxwriter import plotly.express as px # Declare all the necessary variables classifier_identifier = 'target' params = get_classifier_params(classifier_identifier) ## Labels dictionary ### _lab_dict = { 'NEGATIVE':'NO TARGET INFO', 'TARGET':'TARGET', } @st.cache_data def to_excel(df): len_df = len(df) output = BytesIO() writer = pd.ExcelWriter(output, engine='xlsxwriter') df.to_excel(writer, index=False, sheet_name='Sheet1') workbook = writer.book worksheet = writer.sheets['Sheet1'] worksheet.data_validation('E2:E{}'.format(len_df), {'validate': 'list', 'source': ['No', 'Yes', 'Discard']}) writer.save() processed_data = output.getvalue() return processed_data def app(): #### APP INFO ##### # st.write( # """ # The **Target Extraction** app is an easy-to-use interface built \ # in Streamlit for analyzing policy documents for \ # Classification of the paragraphs/texts in the document *If it \ # contains any Economy-Wide Targets related information* - \ # developed by GIZ Data Service Center, GFA, IKI Tracs, \ # SV Klima and SPA. \n # """) ### Main app code ### with st.container(): if 'key0' in st.session_state: df = st.session_state.key0 #load Classifier classifier = load_targetClassifier(classifier_name=params['model_name']) st.session_state['{}_classifier'.format(classifier_identifier)] = classifier if len(df) > 100: warning_msg = ": This might take sometime, please sit back and relax." else: warning_msg = "" df = target_classification(haystack_doc=df, threshold= params['threshold']) st.session_state.key1 = df # # excel part # temp = df[df['Relevancy']>threshold] # df['Validation'] = 'No' # df_xlsx = to_excel(df) # st.download_button(label='📥 Download Current Result', # data=df_xlsx , # file_name= 'file_target.xlsx') def target_display(): if 'key1' in st.session_state: df = st.session_state.key1 hits = df[df['Target Label'] == 'TARGET'] range_val = min(5,len(hits)) if range_val !=0: count_df = df['Target Label'].value_counts() count_df = count_df.rename('count') count_df = count_df.rename_axis('Target Label').reset_index() count_df['Label_def'] = count_df['Target Label'].apply(lambda x: _lab_dict[x]) fig = px.bar(count_df, y="Label_def", x="count", orientation='h', height=200) c1, c2 = st.columns([1,1]) with c1: st.plotly_chart(fig,use_container_width= True) hits = hits.sort_values(by=['Relevancy'], ascending=False) st.write("") st.markdown("###### Top few Economy Wide Target Classified paragraph/text results ######") range_val = min(5,len(hits)) for i in range(range_val): # the page number reflects the page that contains the main paragraph # according to split limit, the overlapping part can be on a separate page st.write('**Result {}** `page {}` (Relevancy Score: {:.2f})'.format(i+1,hits.iloc[i]['page'],hits.iloc[i]['Relevancy'])) st.write("\t Text: \t{}".format(hits.iloc[i]['text'].replace("\n", " "))) else: st.info("🤔 No Targets found")