John Graham Reynolds
commited on
Commit
•
ea607d4
1
Parent(s):
40a8b4b
fix key error by adding metric map
Browse files
app.py
CHANGED
@@ -20,36 +20,44 @@ This Space shows how one can instantiate these custom `evaluate.Metric`s, each w
|
|
20 |
HF `evaluate.CombinedEvaluations` object. From here, we can easily compute each of the metrics simultaneously using `compute`.</p>
|
21 |
"""
|
22 |
|
23 |
-
article = """<p style='text-align: center'>Check out the [original repo](https://github.com/johngrahamreynolds/FixedMetricsForHF) housing this code, and a quickly \
|
24 |
-
trained [multilabel text classification model](https://github.com/johngrahamreynolds/RoBERTa-base-DReiFT/tree/main) that makes use of it during evaluation.</p>"""
|
25 |
|
26 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
metric_set = set(
|
|
|
29 |
combined_list = []
|
30 |
|
31 |
if "f1" in metric_set:
|
32 |
-
f1 = FixedF1(average=
|
33 |
combined_list.append(f1)
|
34 |
if "precision" in metric_set:
|
35 |
-
precision = FixedPrecision(average=
|
36 |
combined_list.append(precision)
|
37 |
if "recall" in metric_set:
|
38 |
-
recall = FixedRecall(average=
|
39 |
combined_list.append(recall)
|
40 |
|
41 |
combined = evaluate.combine(combined_list)
|
42 |
|
43 |
-
predicted = [int(num) for num in
|
44 |
-
references = [int(num) for num in
|
45 |
|
46 |
combined.add_batch(predictions=predicted, references=references)
|
47 |
outputs = combined.compute()
|
48 |
|
49 |
return f"Your metrics are as follows: \n {outputs}"
|
50 |
|
51 |
-
def filter_records(records, gender):
|
52 |
-
return records[records["gender"] == gender]
|
53 |
|
54 |
space = gr.Interface(
|
55 |
fn=evaluation,
|
@@ -72,7 +80,6 @@ space = gr.Interface(
|
|
72 |
outputs="text",
|
73 |
title=title,
|
74 |
description=description,
|
75 |
-
article=article,
|
76 |
examples=[
|
77 |
[
|
78 |
pd.DataFrame(columns=["Predicted Class Label", "Actual Class Label"], data=[[0,1],[1,1],[2,2],[1,0],[0,0]]),
|
|
|
20 |
HF `evaluate.CombinedEvaluations` object. From here, we can easily compute each of the metrics simultaneously using `compute`.</p>
|
21 |
"""
|
22 |
|
|
|
|
|
23 |
|
24 |
+
def populate_map(metric_df: pd.DataFrame, metric_set: set) -> dict:
|
25 |
+
|
26 |
+
metric_map = dict()
|
27 |
+
|
28 |
+
for key in metric_set:
|
29 |
+
for val in metric_df.loc[metric_df["metric"] == key]["average"]:
|
30 |
+
metric_map[key] = val
|
31 |
+
|
32 |
+
return metric_map
|
33 |
+
|
34 |
+
|
35 |
+
def evaluation(predictions_df: pd.DataFrame, metrics_df: pd.DataFrame) -> str:
|
36 |
|
37 |
+
metric_set = set(metrics_df["Metric"].to_list())
|
38 |
+
metric_map = populate_map(metrics_df, metric_set)
|
39 |
combined_list = []
|
40 |
|
41 |
if "f1" in metric_set:
|
42 |
+
f1 = FixedF1(average=metric_map["f1"])
|
43 |
combined_list.append(f1)
|
44 |
if "precision" in metric_set:
|
45 |
+
precision = FixedPrecision(average=metric_map["f1"])
|
46 |
combined_list.append(precision)
|
47 |
if "recall" in metric_set:
|
48 |
+
recall = FixedRecall(average=metric_map["f1"])
|
49 |
combined_list.append(recall)
|
50 |
|
51 |
combined = evaluate.combine(combined_list)
|
52 |
|
53 |
+
predicted = [int(num) for num in predictions_df["Predicted Class Label"].to_list()]
|
54 |
+
references = [int(num) for num in predictions_df["Actual Class Label"].to_list()]
|
55 |
|
56 |
combined.add_batch(predictions=predicted, references=references)
|
57 |
outputs = combined.compute()
|
58 |
|
59 |
return f"Your metrics are as follows: \n {outputs}"
|
60 |
|
|
|
|
|
61 |
|
62 |
space = gr.Interface(
|
63 |
fn=evaluation,
|
|
|
80 |
outputs="text",
|
81 |
title=title,
|
82 |
description=description,
|
|
|
83 |
examples=[
|
84 |
[
|
85 |
pd.DataFrame(columns=["Predicted Class Label", "Actual Class Label"], data=[[0,1],[1,1],[2,2],[1,0],[0,0]]),
|