|
from fixed_f1 import FixedF1 |
|
from fixed_precision import FixedPrecision |
|
from fixed_recall import FixedRecall |
|
|
|
import gradio as gr |
|
|
|
title = "'Combine' multiple metrics with this π€ Evaluate πͺ² Fix!" |
|
|
|
description = """<p style='text-align: center'> |
|
As I introduce myself to the entirety of the π€ ecosystem, I've put together this space to show off a workaround for a current πͺ² in the π€ Evaluate library. \n |
|
|
|
\tCheck out the original, longstanding issue [here](https://github.com/huggingface/evaluate/issues/234). This details how it is currently impossible to \ |
|
'evaluate.combine()' multiple metrics related to multilabel text classification. Particularly, one cannot 'combine()' the f1, precision, and recall scores for \ |
|
evaluation. I encountered this issue specifically while training [RoBERTa-base-DReiFT](https://huggingface.co/MarioBarbeque/RoBERTa-base-DReiFT) for multilabel \ |
|
text classification of 805 labeled medical conditions based on drug reviews for treatment received for the same underlying conditio. Use the space below for \ |
|
a preview of the workaround! </p> |
|
|
|
|
|
""" |
|
|
|
article = "<p style='text-align: center'>Check out the [original repo](https://github.com/johngrahamreynolds/FixedMetricsForHF) housing this code, and a quickly \ |
|
trained [multilabel text classicifcation model](https://github.com/johngrahamreynolds/RoBERTa-base-DReiFT/tree/main) that makes use of it during evaluation.</p>" |
|
|
|
def show_off(input): |
|
f1 = FixedF1() |
|
precision = FixedPrecision() |
|
recall = FixedRecall() |
|
|
|
|
|
|
|
return "Checking this out! Here's what you put in: " + f"""{input} """ |
|
|
|
|
|
gr.Interface( |
|
fn=show_off, |
|
inputs="textbox", |
|
outputs="text", |
|
title=title, |
|
description=description, |
|
article=article, |
|
examples=[["What are you doing?"], ["Where should we time travel to?"]], |
|
).launch() |