Spaces:
GIZ
/
Running on CPU Upgrade

File size: 8,759 Bytes
0a1e238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3936853
31b5a19
 
c48611a
614088e
db74214
eab471f
0a1e238
 
74a942d
31b5a19
 
 
 
 
 
 
 
afff22e
31b5a19
546504d
0a1e238
57455f3
31b5a19
 
 
 
 
6f96de4
546504d
 
 
 
 
31b5a19
546504d
 
31b5a19
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
546504d
31b5a19
 
 
 
 
 
 
546504d
31b5a19
 
 
a12fa3b
 
adac0c9
31b5a19
 
 
 
 
 
 
 
a12fa3b
f57ce49
31b5a19
 
 
8541d11
31b5a19
 
a12fa3b
ccd8d04
879b028
19a3b41
174e5db
879b028
174e5db
 
 
879b028
174e5db
 
 
 
 
 
9a74f5f
 
 
 
 
 
174e5db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
879b028
19a3b41
ccd8d04
0919f9a
 
31b5a19
 
 
 
0919f9a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
import streamlit as st
import os
import pkg_resources

# Using this wacky hack to get around the massively ridicolous managed env loading order
def is_installed(package_name, version):
    try:
        pkg = pkg_resources.get_distribution(package_name)
        return pkg.version == version
    except pkg_resources.DistributionNotFound:
        return False

# shifted from below - this must be the first streamlit call; otherwise: problems
st.set_page_config(page_title = 'Vulnerability Analysis', 
                   initial_sidebar_state='expanded', layout="wide") 

@st.cache_resource # cache the function so it's not called every time app.py is triggered
def install_packages():
    install_commands = []

    if not is_installed("spaces", "0.12.0"):
        install_commands.append("pip install spaces==0.17.0")
    
    if not is_installed("pydantic", "1.8.2"):
        install_commands.append("pip install pydantic==1.8.2")

    if not is_installed("typer", "0.4.0"):
        install_commands.append("pip install typer==0.4.0")

    if install_commands:
        os.system(" && ".join(install_commands))

# install packages if necessary
install_packages()


import appStore.vulnerability_analysis as vulnerability_analysis
import appStore.doc_processing as processing
from utils.uploadAndExample import add_upload
from utils.vulnerability_classifier import label_dict
import pandas as pd
import plotly.express as px

#st.set_page_config(page_title = 'Vulnerability Analysis', 
 #                  initial_sidebar_state='expanded', layout="wide") 

with st.sidebar:
    # upload and example doc
    choice = st.sidebar.radio(label = 'Select the Document',
                            help = 'You can upload the document \
                            or else you can try a example document', 
                            options = ('Upload Document', 'Try Example'), 
                            horizontal = True)
    add_upload(choice) 

with st.container():
    st.markdown("<h2 style='text-align: center; color: black;'> Vulnerability Analysis 2.0 </h2>", unsafe_allow_html=True)
    st.write(' ')

with st.expander("ℹ️ - About this app", expanded=False):
    st.write(
        """
        The Vulnerability Analysis App is an open-source\
        digital tool which aims to assist policy analysts and \
        other users in extracting and filtering references \
        to different groups in vulnerable situations from public documents. \
        We use Natural Language Processing (NLP), specifically deep \
        learning-based text representations  to search context-sensitively \
        for mentions of the special needs of groups in vulnerable situations 
        to cluster them thematically. 
        """)
    
    #st.write('**Definitions**')

    # st.caption("""
    #         - **Target**: Targets are an intention to achieve a specific result, \
    #         for example, to reduce GHG emissions to a specific level \
    #         (a GHG target) or increase energy efficiency or renewable \
    #         energy to a specific level (a non-GHG target), typically by \ 
    #         a certain date.
    #         - **Economy-wide Target**: Certain Target are applicable \
    #             not at specific Sector level but are applicable at economic \
    #             wide scale.
    #         - **Netzero**: Identifies if its Netzero Target or not.
    #             - 'NET-ZERO': target_labels = ['T_Netzero','T_Netzero_C']
    #             - 'Non Netzero Target': target_labels_neg = ['T_Economy_C',
    #               'T_Economy_Unc','T_Adaptation_C','T_Adaptation_Unc','T_Transport_C',
    #               'T_Transport_O_C','T_Transport_O_Unc','T_Transport_Unc']
    #             - 'Others': Other Targets beside covered above
    #         - **GHG Target**: GHG targets refer to contributions framed as targeted \
    #                           outcomes in GHG terms.
    #             - 'GHG': target_labels_ghg_yes = ['T_Transport_Unc','T_Transport_C']
    #             - 'NON GHG TRANSPORT TARGET': target_labels_ghg_no = ['T_Adaptation_Unc',\
    #                'T_Adaptation_C', 'T_Transport_O_Unc', 'T_Transport_O_C']
    #             - 'OTHERS': Other Targets beside covered above.
    #         - **Conditionality**: An “unconditional contribution” is what countries \
    #          could implement without any conditions and based on their own \
    #          resources and capabilities. A “conditional contribution” is one \
    #          that countries would undertake if international means of support \
    #          are provided, or other conditions are met.
    #         - **Action**: Actions are an intention to implement specific means of \
    #          achieving GHG reductions, usually in forms of concrete projects.
    #         - **Policies and Plans**: Policies are domestic planning documents \
    #           such as policies, regulations or guidlines, and Plans  are broader \
    #          than specific policies or actions, such as a general intention \ 
    #          to ‘improve efficiency’, ‘develop renewable energy’, etc. \
    #         The terms come from the World Bank's NDC platform and WRI's publication.
    #           """)
    
    #c1, c2, c3 =  st.columns([12,1,10])
    #with c1:
    #    image = Image.open('docStore/img/flow.jpg') 
    #    st.image(image)
    #with c3:
    
    st.write("""
        What Happens in background?
        
        - Step 1: Once the document is provided to app, it undergoes *Pre-processing*.\
        In this step the document is broken into smaller paragraphs \
        (based on word/sentence count).
        - Step 2: The paragraphs are then fed to the **Vulnerability Classifier** which detects if
        the paragraph contains any or multiple references to vulnerable groups.
        """)
                  
    st.write("")

# Define the apps used
apps = [processing.app, vulnerability_analysis.app]

multiplier_val =1/len(apps)
if st.button("Analyze Document"):
    prg = st.progress(0.0)
    for i,func in enumerate(apps):
        func()
        prg.progress((i+1)*multiplier_val)

# If there is data stored
if 'key0' in st.session_state:
    with st.sidebar:
        topic = st.radio(
                        "Which category you want to explore?",
                        (['Vulnerability']))
    
    if topic == 'Vulnerability':

        # Assign dataframe a name
        df_vul = st.session_state['key0']

        col1, col2 = st.columns([1,1])

        with col1:
            # Header
            st.subheader("Explore references to vulnerable groups:")

            # Text 
            num_paragraphs = len(df_vul['Vulnerability Label'])
            num_references = len(df_vul[df_vul['Vulnerability Label'] != 'Other'])
            
            st.markdown(f"""<div style="text-align: justify;"> The document contains a
                    total of <span style="color: red;">{num_paragraphs}</span> paragraphs.
                    We identified <span style="color: red;">{num_references}</span>
                    references to vulnerable groups.</div>
                    <br>
                    In the pie chart on the right you can see the distribution of the different 
                    groups defined. For a more detailed view in the text, see the paragraphs and 
                    their respective labels in the table below.</div>""", unsafe_allow_html=True)
    
        with col2:
            ### Pie chart
                        
            # Create a df that stores all the labels
            df_labels = pd.DataFrame(list(label_dict.items()), columns=['Label ID', 'Label'])
    
            # Count how often each label appears in the "Vulnerability Labels" column
            label_counts = df_vul['Vulnerability Label'].value_counts().reset_index()
            label_counts.columns = ['Label', 'Count']
    
            # Merge the label counts with the df_label DataFrame
            df_labels = df_labels.merge(label_counts, on='Label', how='left')
    
            # Configure graph
            fig = px.pie(df_labels,
                    names="Label", 
                    values="Count",
                    title='Label Counts',
                    hover_name="Count",
                    color_discrete_sequence=px.colors.qualitative.Plotly
            )
            
            #Show plot
            st.plotly_chart(fig, use_container_width=True)
    
        ### Table 
        st.table(df_vul[df_vul['Vulnerability Label'] != 'Other'])

       # vulnerability_analysis.vulnerability_display()
    # elif topic == 'Action':
    #     policyaction.action_display()
    # else: 
    #     policyaction.policy_display()
    #st.write(st.session_state.key0)