Spaces:
Sleeping
Sleeping
File size: 1,200 Bytes
a6a9fb1 9facccd 11b777c 55fc025 a6a9fb1 2ad9277 9aa421e c7a94cb a6a9fb1 1c0a85d a6a9fb1 56776a9 9aa421e a6a9fb1 c7a94cb 11b777c c7a94cb 11b777c c7a94cb 11b777c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 |
from datasets import load_dataset
from evaluate import evaluator
from transformers import AutoModelForSequenceClassification, pipeline, AutoTokenizer
import gradio as gr
data = load_dataset("huolongguo10/check_sec_eval",split="test")
task_evaluator = evaluator("text-classification")
model = AutoModelForSequenceClassification.from_pretrained("huolongguo10/check_sec")
tokenizer = AutoTokenizer.from_pretrained("huolongguo10/check_sec")
model_tiny = AutoModelForSequenceClassification.from_pretrained("huolongguo10/check_sec_tiny")
tokenizer_tiny = AutoTokenizer.from_pretrained("huolongguo10/check_sec_tiny")
# 1. Pass a model name or path
eval_results = task_evaluator.compute(
model_or_pipeline=model,
data=data,
input_column="sentence1",
label_mapping={"LABEL_0": 0, "LABEL_1": 1},
tokenizer=tokenizer
)
eval_results_tiny = task_evaluator.compute(
model_or_pipeline=model_tiny,
data=data,
input_column="sentence1",
label_mapping={"LABEL_0": 0, "LABEL_1": 1},
tokenizer=tokenizer_tiny
)
with gr.Blocks() as demo:
gr.Markdown('# Base:')
gr.JSON(eval_results)
gr.Markdown('# Tiny:')
gr.JSON(eval_results_tiny)
print(eval_results)
demo.launch() |