File size: 2,374 Bytes
031e5e2
 
 
 
6d737a4
 
 
031e5e2
 
 
 
 
6d737a4
 
 
 
 
 
 
 
031e5e2
6d737a4
 
 
031e5e2
dc55918
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6d737a4
 
dc55918
6d737a4
dc55918
6d737a4
 
 
acff600
6d737a4
acff600
6d737a4
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import appStore.target as target_extraction
import appStore.netzero as netzero
import appStore.sector as sector
import appStore.adapmit as adapmit
import appStore.ghg as ghg
import appStore.doc_processing as processing
from utils.uploadAndExample import add_upload
import streamlit as st

st.set_page_config(page_title = 'Climate Policy Intelligence', 
                   initial_sidebar_state='expanded', layout="wide") 

with st.sidebar:
    # upload and example doc
    choice = st.sidebar.radio(label = 'Select the Document',
                            help = 'You can upload the document \
                            or else you can try a example document', 
                            options = ('Upload Document', 'Try Example'), 
                            horizontal = True)
    add_upload(choice) 

with st.container():
        st.markdown("<h2 style='text-align: center; color: black;'> Climate Policy Intelligence App </h2>", unsafe_allow_html=True)
        st.write(' ')

with st.expander("ℹ️ - About this app", expanded=False):
    st.write(
        """
        Climate Policy Understanding App is an open-source\
        digital tool which aims to assist policy analysts and \
        other users in extracting and filtering relevant \
        information from public documents.

        What Happens in background?

        - Step 1: Once the document is provided to app, it undergoes *Pre-processing*.\
        In this step the document is broken into smaller paragraphs \
        (based on word/sentence count).
        - Step 2: The paragraphs are fed to **Target Classifier** which detects if
        the paragraph contains any *Target* related information or not.
        - Step 3: The paragraphs which are detected containing some target \
        related information are then fed to multiple classifier to enrich the 
        Information Extraction.

        Classifiers
        - Netzero: 
               
        """)
    st.write("")
apps = [processing.app, target_extraction.app, netzero.app, ghg.app,
        sector.app, adapmit.app]
multiplier_val =100/len(apps)
if st.button("Get the work done"):
    prg = st.progress(0.0)
    for i,func in enumerate(apps):
        func()
        prg.progress((i+1)*multiplier_val)

    if 'key1' in st.session_state:
        target_extraction.target_display()
        st.write(st.session_state.key1)