Matt C commited on
Commit
e104571
1 Parent(s): 6a356e0
Files changed (1) hide show
  1. app.py +8 -5
app.py CHANGED
@@ -13,13 +13,16 @@ tokenizer = AutoTokenizer.from_pretrained("s-nlp/roberta_toxicity_classifier")
13
  model = AutoModelForSequenceClassification.from_pretrained("s-nlp/roberta_toxicity_classifier")
14
  batch = tokenizer.encode(txt, return_tensors='pt')
15
 
16
- # e.g. "logits":"tensor([[ 4.8982, -5.1952]], grad_fn=<AddmmBackward0>)"
17
  result = model(batch)
18
 
19
- # get probabilities
 
20
  prediction = nn.functional.softmax(result.logits, dim=-1)
21
 
22
- prediction
 
23
 
24
- #fig = px.bar(result, x="", y="", orientation='h')
25
- #fig.show()
 
 
13
  model = AutoModelForSequenceClassification.from_pretrained("s-nlp/roberta_toxicity_classifier")
14
  batch = tokenizer.encode(txt, return_tensors='pt')
15
 
16
+ # run model e.g. "logits": tensor([[ 4.8982, -5.1952]], grad_fn=<AddmmBackward0>)
17
  result = model(batch)
18
 
19
+ # get probabilities e.g. tensor([[9.9996e-01, 4.2627e-05]], grad_fn=<SoftmaxBackward0>)
20
+ # first indice is neutral, second is toxic
21
  prediction = nn.functional.softmax(result.logits, dim=-1)
22
 
23
+ neutralProb = round(prediction[0][0], 4)
24
+ toxicProb = round(prediction[0][1], 4)
25
 
26
+ print("Classification Probabilities")
27
+ print(f"Neutral: {neutralProb}")
28
+ print(f"Toxic: {toxicProb}")