Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -39,28 +39,8 @@ if st.button("Analyser le texte"):
|
|
39 |
result = classifier(text, candidate_labels, hypothesis_template=hypothesis_template)
|
40 |
|
41 |
if result['labels'][0] == 1:
|
42 |
-
st.info(f"Résults
|
43 |
if result['labels'][0] == 0:
|
44 |
-
st.info(f"
|
45 |
else:
|
46 |
-
st.write("Text Analysis.")
|
47 |
-
|
48 |
-
# Calculer les métriques de performance (vous devez ajuster ces lignes selon votre tâche)
|
49 |
-
inputs = df["text"].tolist()
|
50 |
-
true_labels = df["label"].tolist()
|
51 |
-
predictions = classifier(inputs, candidate_labels, hypothesis_template=hypothesis_template)
|
52 |
-
predicted_labels = [result['labels'][0] for result in predictions]
|
53 |
-
|
54 |
-
accuracy = accuracy_score(true_labels, predicted_labels)
|
55 |
-
precision = precision_score(true_labels, predicted_labels, average='binary')
|
56 |
-
recall = recall_score(true_labels, predicted_labels, average='binary')
|
57 |
-
f1 = f1_score(true_labels, predicted_labels, average='binary')
|
58 |
-
balanced_accuracy = balanced_accuracy_score(true_labels, predicted_labels)
|
59 |
-
|
60 |
-
# Afficher les métriques sous forme de tableau
|
61 |
-
st.header("Evaluation of our models")
|
62 |
-
metrics_df = pd.DataFrame({
|
63 |
-
"Métrique": ["Accuracy", "Precision", "Recall", "F1-score", "Balanced Accuracy"],
|
64 |
-
"Valeur": [accuracy, precision, recall, f1, balanced_accuracy]
|
65 |
-
})
|
66 |
-
st.table(metrics_df)
|
|
|
39 |
result = classifier(text, candidate_labels, hypothesis_template=hypothesis_template)
|
40 |
|
41 |
if result['labels'][0] == 1:
|
42 |
+
st.info(f"Résults Good comments ,accuracy {result['scores'][0]*100:.2f}%")
|
43 |
if result['labels'][0] == 0:
|
44 |
+
st.info(f" Bad comments,accuracy= {result['scores'][0]*100:.2f}%")
|
45 |
else:
|
46 |
+
st.write("Text Analysis.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|