Matt C commited on
Commit
33ec467
1 Parent(s): 79c7e0d
Files changed (1) hide show
  1. app.py +8 -11
app.py CHANGED
@@ -5,31 +5,28 @@ import torch
5
  from torch import nn
6
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
 
8
- deftxt = "I hate you cancerous insects so much"
9
- txt = st.text_area('Text to analyze', deftxt)
10
 
11
  # load tokenizer and model weights
12
  tokenizer = AutoTokenizer.from_pretrained("s-nlp/roberta_toxicity_classifier")
13
  model = AutoModelForSequenceClassification.from_pretrained("s-nlp/roberta_toxicity_classifier")
14
  batch = tokenizer.encode(txt, return_tensors='pt')
15
 
16
- # run model e.g. "logits": tensor([[ 4.8982, -5.1952]], grad_fn=<AddmmBackward0>)
 
17
  result = model(batch)
18
 
19
- # get probabilities e.g. tensor([[9.9996e-01, 4.2627e-05]], grad_fn=<SoftmaxBackward0>)
 
20
  # first indice is neutral, second is toxic
21
  prediction = nn.functional.softmax(result.logits, dim=-1)
22
-
23
  neutralProb = prediction.data[0][0]
24
  toxicProb = prediction.data[0][1]
25
 
26
- neutralProb = torch.round(neutralProb, decimals=4)
27
- toxicProb = torch.round(toxicProb, decimals=4)
28
-
29
  # default text input ought to return:
30
  # Neutral: 0.0052
31
  # Toxic: 0.9948
32
-
33
  st.write("Classification Probabilities")
34
- st.write(f"{neutralProb:.4} - NEUTRAL")
35
- st.write(f"{toxicProb:.4} - TOXIC")
 
5
  from torch import nn
6
  from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
 
8
+ defaultTxt = "I hate you cancerous insects so much"
9
+ txt = st.text_area('Text to analyze', defaultTxt)
10
 
11
  # load tokenizer and model weights
12
  tokenizer = AutoTokenizer.from_pretrained("s-nlp/roberta_toxicity_classifier")
13
  model = AutoModelForSequenceClassification.from_pretrained("s-nlp/roberta_toxicity_classifier")
14
  batch = tokenizer.encode(txt, return_tensors='pt')
15
 
16
+ # run encoding through model to get classification output
17
+ # e.g. "logits": tensor([[ 4.8982, -5.1952]], grad_fn=<AddmmBackward0>)
18
  result = model(batch)
19
 
20
+ # transform logit to get probabilities
21
+ # e.g. tensor([[9.9996e-01, 4.2627e-05]], grad_fn=<SoftmaxBackward0>)
22
  # first indice is neutral, second is toxic
23
  prediction = nn.functional.softmax(result.logits, dim=-1)
 
24
  neutralProb = prediction.data[0][0]
25
  toxicProb = prediction.data[0][1]
26
 
 
 
 
27
  # default text input ought to return:
28
  # Neutral: 0.0052
29
  # Toxic: 0.9948
 
30
  st.write("Classification Probabilities")
31
+ st.write(f"{neutralProb:.4} - NEUTRAL")
32
+ st.write(f"{toxicProb:.4} - TOXIC")