File size: 2,884 Bytes
86607a2
 
 
0765d8d
86607a2
 
cf25467
86607a2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0765d8d
473c1df
f0d28e4
473c1df
97f99e6
86607a2
0765d8d
 
 
 
 
 
 
 
54e8b17
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86607a2
 
54e8b17
86607a2
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import streamlit as st
import pandas as pd
from io import StringIO
from util.evaluation import statistical_tests, result_evaluation,calculate_correlations,calculate_divergences

def app():
    st.title('Result Evaluation')

    # Allow users to upload a CSV file with processed results
    uploaded_file = st.file_uploader("Upload your processed CSV file", type="csv")
    if uploaded_file is not None:
        data = StringIO(uploaded_file.getvalue().decode('utf-8'))
        df = pd.read_csv(data)

        # Add ranks for each score within each row
        ranks = df[['Privilege_Avg_Score', 'Protect_Avg_Score', 'Neutral_Avg_Score']].rank(axis=1, ascending=False)

        df['Privilege_Rank'] = ranks['Privilege_Avg_Score']
        df['Protect_Rank'] = ranks['Protect_Avg_Score']
        df['Neutral_Rank'] = ranks['Neutral_Avg_Score']

        st.write('Uploaded Data:', df)

        if st.button('Evaluate Data'):
            with st.spinner('Evaluating data...'):
                # Existing statistical tests
                test_results = statistical_tests(df)
                st.write('Test Results:', test_results)
                # evaluation_results = result_evaluation(test_results)
                # st.write('Evaluation Results:', evaluation_results)

                # New correlation calculations
                correlation_results = calculate_correlations(df)
                st.write('Correlation Results:', correlation_results)

                # New divergence calculations
                divergence_results = calculate_divergences(df)
                st.write('Divergence Results:', divergence_results)

                # Flatten the results for combining
                flat_test_results = {f"{key1}_{key2}": value2 for key1, value1 in test_results.items() for key2, value2
                                     in (value1.items() if isinstance(value1, dict) else {key1: value1}.items())}
                flat_correlation_results = {f"Correlation_{key1}": value1 for key1, value1 in
                                            correlation_results.items()}
                flat_divergence_results = {f"Divergence_{key1}": value1 for key1, value1 in divergence_results.items()}

                # Combine all results
                results_combined = {**flat_test_results, **flat_correlation_results, **flat_divergence_results}

                # Convert to DataFrame for download
                results_df = pd.DataFrame(list(results_combined.items()), columns=['Metric', 'Value'])

                st.write('Combined Results:', results_df)

                st.download_button(
                    label="Download Evaluation Results",
                    data=results_df.to_csv(index=False).encode('utf-8'),
                    file_name='evaluation_results.csv',
                    mime='text/csv',
                )

if __name__ == "__main__":
    app()