Spaces:
Sleeping
Sleeping
from datasets import load_dataset | |
from evaluate import evaluator | |
from transformers import AutoModelForSequenceClassification, pipeline, AutoTokenizer | |
import gradio as gr | |
data = load_dataset("huolongguo10/check_sec_eval",split="test") | |
task_evaluator = evaluator("text-classification") | |
model = AutoModelForSequenceClassification.from_pretrained("huolongguo10/check_sec") | |
tokenizer = AutoTokenizer.from_pretrained("huolongguo10/check_sec") | |
model_tiny = AutoModelForSequenceClassification.from_pretrained("huolongguo10/check_sec_tiny") | |
tokenizer_tiny = AutoTokenizer.from_pretrained("huolongguo10/check_sec_tiny") | |
# 1. Pass a model name or path | |
eval_results = task_evaluator.compute( | |
model_or_pipeline=model, | |
data=data, | |
input_column="sentence1", | |
label_mapping={"LABEL_0": 0, "LABEL_1": 1}, | |
tokenizer=tokenizer | |
) | |
eval_results_tiny = task_evaluator.compute( | |
model_or_pipeline=model_tiny, | |
data=data, | |
input_column="sentence1", | |
label_mapping={"LABEL_0": 0, "LABEL_1": 1}, | |
tokenizer=tokenizer_tiny | |
) | |
with gr.Blocks() as demo: | |
gr.Markdown('# Base:') | |
gr.JSON(eval_results) | |
gr.Markdown('# Tiny:') | |
gr.JSON(eval_results_tiny) | |
print(eval_results) | |
demo.launch() |