magnolia-pm's picture
init commit
164cb45
raw
history blame
2.06 kB
import torch
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSequenceClassification
import plotly.graph_objects as go
input_text = st.text_input(
label='Estimate item desirability:',
value='I love a good fight.',
placeholder='Enter item'
)
#model_path = '/nlp/nlp/models/finetuned/twitter-xlm-roberta-base-regressive-desirability-ft-4'
model_path = 'magnolia-psychometrics/item-desirability'
tokenizer = AutoTokenizer.from_pretrained(
pretrained_model_name_or_path=model_path,
use_fast=True
)
model = AutoModelForSequenceClassification.from_pretrained(
pretrained_model_name_or_path=model_path,
num_labels=1,
ignore_mismatched_sizes=True,
)
def z_score(y, mean=.04853076, sd=.9409466):
return (y - mean) / sd
if input_text:
inputs = tokenizer(input_text, padding=True, return_tensors='pt')
with torch.no_grad():
score = model(**inputs).logits.squeeze().tolist()
z = z_score(score)
fig = go.Figure(go.Indicator(
mode = "gauge+delta",
value = z,
domain = {'x': [0, 1], 'y': [0, 1]},
title = f"Item Desirability <br><sup>\"{input_text}\"</sup>",
delta = {
'reference': 0,
'decreasing': {'color': "#ec4899"},
'increasing': {'color': "#36def1"}
},
gauge = {
'axis': {'range': [-4, 4], 'tickwidth': 1, 'tickcolor': "black"},
'bar': {'color': "#4361ee"},
'bgcolor': "white",
'borderwidth': 2,
'bordercolor': "#efefef",
'steps': [
{'range': [-4, 0], 'color': '#efefef'},
{'range': [0, 4], 'color': '#efefef'}],
'threshold': {
'line': {'color': "#4361ee", 'width': 8},
'thickness': 0.75,
'value': z}
}))
fig.update_layout(
paper_bgcolor = "white",
font = {'color': "black", 'family': "Arial"})
st.plotly_chart(fig, theme=None, use_container_width=True)