File size: 1,152 Bytes
1a5f890
bf79d0d
9a33247
 
 
bf79d0d
2268b75
33ec467
 
2268b75
 
6767b70
 
7186326
2268b75
33ec467
 
bf79d0d
9a33247
33ec467
 
e104571
9a33247
d72e17f
 
bf79d0d
79c7e0d
 
 
8b74bcc
3965ceb
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import streamlit as st
import plotly.express as px
import torch

from torch import nn
from transformers import AutoTokenizer, AutoModelForSequenceClassification

defaultTxt = "I hate you cancerous insects so much"
txt = st.text_area('Text to analyze', defaultTxt)

# load tokenizer and model weights
tokenizer = AutoTokenizer.from_pretrained("s-nlp/roberta_toxicity_classifier")
model = AutoModelForSequenceClassification.from_pretrained("s-nlp/roberta_toxicity_classifier")
batch = tokenizer.encode(txt, return_tensors='pt')

# run encoding through model to get classification output
# e.g. "logits": tensor([[ 4.8982, -5.1952]], grad_fn=<AddmmBackward0>)
result = model(batch)

# transform logit to get probabilities
# e.g. tensor([[9.9996e-01, 4.2627e-05]], grad_fn=<SoftmaxBackward0>)
# first indice is neutral, second is toxic
prediction = nn.functional.softmax(result.logits, dim=-1)
neutralProb = prediction.data[0][0]
toxicProb = prediction.data[0][1]

# default text input ought to return:
# Neutral: 0.0052
# Toxic: 0.9948
st.write("Classification Probabilities")
st.write(f"{neutralProb:4.4} - NEUTRAL")
st.write(f"{toxicProb:4.4} - TOXIC")