Fralet commited on
Commit
ceeef94
·
verified ·
1 Parent(s): c08559d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +44 -1
app.py CHANGED
@@ -1,4 +1,4 @@
1
- import streamlit as st
2
  import pandas as pd
3
  from transformers import pipeline
4
  import re
@@ -66,3 +66,46 @@ predict_and_log(data_open, 'Predicted_M', 'processed_text_mopen', true_label_col
66
 
67
  # Optionally display a confirmation message
68
  st.write("Predictions have been logged. Check your logs for details.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """import streamlit as st
2
  import pandas as pd
3
  from transformers import pipeline
4
  import re
 
66
 
67
  # Optionally display a confirmation message
68
  st.write("Predictions have been logged. Check your logs for details.")
69
+ """
70
+ import pandas as pd
71
+ from transformers import pipeline
72
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support, confusion_matrix
73
+ import seaborn as sns
74
+ import matplotlib.pyplot as plt
75
+
76
+ # Load data
77
+ data = pd.read_excel("ResponseOpenPredicted.xlsx")
78
+
79
+ # Calculate metrics
80
+ def calculate_metrics(true_labels, predicted_labels):
81
+ accuracy = accuracy_score(true_labels, predicted_labels)
82
+ precision, recall, f1_score, _ = precision_recall_fscore_support(true_labels, predicted_labels, average='weighted')
83
+ return accuracy, precision, recall, f1_score
84
+
85
+ accuracy_f, precision_f, recall_f, f1_score_f = calculate_metrics(data['True_label'], data['Predicted_F'])
86
+ accuracy_m, precision_m, recall_m, f1_score_m = calculate_metrics(data['True_label'], data['Predicted_M'])
87
+
88
+ # Confusion matrices visualization
89
+ conf_matrix_f = confusion_matrix(data['True_label'], data['Predicted_F'])
90
+ conf_matrix_m = confusion_matrix(data['True_label'], data['Predicted_M'])
91
+
92
+ fig, ax = plt.subplots(1, 2, figsize=(12, 6))
93
+ sns.heatmap(conf_matrix_f, annot=True, fmt="d", cmap="Blues", ax=ax[0])
94
+ ax[0].set_title('Confusion Matrix for Predicted_F')
95
+ sns.heatmap(conf_matrix_m, annot=True, fmt="d", cmap="Purples", ax=ax[1])
96
+ ax[1].set_title('Confusion Matrix for Predicted_M')
97
+
98
+ # Distribution of prediction results
99
+ fig, ax = plt.subplots(1, 2, figsize=(12, 6))
100
+ data['Predicted_F'].value_counts().plot(kind='bar', ax=ax[0], color='blue')
101
+ ax[0].set_title('Distribution of Predictions for Female Inputs')
102
+ ax[0].set_xlabel('Predicted Labels')
103
+ ax[0].set_ylabel('Frequency')
104
+
105
+ data['Predicted_M'].value_counts().plot(kind='bar', ax=ax[1], color='purple')
106
+ ax[1].set_title('Distribution of Predictions for Male Inputs')
107
+ ax[1].set_xlabel('Predicted Labels')
108
+ ax[1].set_ylabel('Frequency')
109
+
110
+ plt.tight_layout()
111
+ plt.show()