peace4ever's picture
finally corrected the error
0d9b200 verified
raw
history blame
No virus
1.9 kB
import streamlit as st
import torch
# from transformers import AutoTokenizer, AutoModelForSequenceClassification, AutoConfig
from transformers import pipeline
model_name = "peace4ever/roberta-large-finetuned-mongolian_v3"
pipeline = pipeline(task="sentiment-analysis", model=model_name) # Load pre-trained pipeline
st.title("Эерэг? Сөрөг эсвэл аль нь ч биш?")
text = st.text_area("Өгүүлбэр оруулна уу?")
if text is not None:
predictions = pipeline(text)
col1, col2 = st.columns(2)
col1.header("Sentiment")
col2.header("Probability")
for label, probability in zip(predictions[0]["label"], predictions[0]["score"]):
if label == "entailment":
sentiment = "Negative"
elif label == "contradiction":
sentiment = "Neutral"
else label == "neutral":
sentiment = "Positive"
col1.write(sentiment)
col2.write(f"{probability:.2f}")
# label = predictions[0]["label"]
# probability = predictions[0]["score"]
# col1.write(label)
# col2.write(f"{probability:.2f}")
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# model = AutoModelForSequenceClassification.from_pretrained(model_name)
#
# encoded_input = tokenizer(text, return_tensors="pt")
# output = model(**encoded_input)
# label_map = {"positive": 0, "negative": 1, "neutral": 2}
# # Update the model configuration with custom labels
# config = AutoConfig.from_pretrained(model_name)
# config.label2id = {"positive": 0, "negative": 1, "neutral": 2}
# config.id2label = {0: "positive", 1: "negative", 2: "neutral"}
# config.save_pretrained(model_name)
# predicted_label_id = torch.argmax(output.logits, dim=1).item()
# id2label = model.config.id2label
# predicted_label = id2label[predicted_label_id]
# print("Predicted Class:", predicted_label)
# st.json(predicted_label)