mformer / app.py
Josh Nguyen
Add HF verification for private models
126c386
raw
history blame
1.82 kB
import torch
import gradio as gr
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
)
from typing import Dict
import os
from huggingface_hub import login
login(token=os.getenv("HUGGINGFACE_TOKEN"))
FOUNDATIONS = ["authority", "care", "fairness", "loyalty", "sanctity"]
tokenizer = AutoTokenizer.from_pretrained(
"joshnguyen/mformer-authority",
use_auth_token=True
)
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MODELS = {}
for foundation in FOUNDATIONS:
model = AutoModelForSequenceClassification.from_pretrained(
pretrained_model_name_or_path=f"joshnguyen/mformer-{foundation}",
use_auth_token=True
)
MODELS[foundation] = model.to(DEVICE)
def classify_text(text: str) -> Dict[str, float]:
# Encode the prompt
inputs = tokenizer([text],
padding=True,
truncation=True,
return_tensors='pt').to(DEVICE)
scores = {}
for foundation in FOUNDATIONS:
model = MODELS[foundation]
outputs = model(**inputs)
outputs = torch.softmax(outputs.logits, dim=1)
outputs = outputs[:, 1]
score = outputs.detach().cpu().numpy()[0]
scores[foundation] = score
return scores
demo = gr.Interface(
fn=classify_text,
inputs=[
# Prompt
gr.Textbox(
label="Input text",
container=False,
show_label=True,
placeholder="Enter some text...",
lines=10,
scale=10,
),
],
outputs=[
gr.Label(
label="Moral foundations scores",
container=False,
show_label=True,
scale=10,
lines=10,
)
],
)
demo.queue(max_size=20).launch()