nppmatt commited on
Commit
36ce82a
1 Parent(s): 53a5efc

hopefully final commit

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -41,7 +41,6 @@ model = AutoModelForSequenceClassification.from_pretrained(modelPath)
41
  # RoBERTA: [0]: neutral, [1]: toxic
42
  encoding = tokenizer.encode(txt, return_tensors='pt')
43
  result = model(encoding)
44
- result
45
 
46
  # transform logit to get probabilities
47
  if (result.logits.size(dim=1) < 2):
@@ -49,13 +48,12 @@ if (result.logits.size(dim=1) < 2):
49
  result.logits = nn.functional.pad(result.logits, pad, "constant", 0)
50
  st.write(result)
51
  prediction = nn.functional.softmax(result.logits, dim=-1)
52
- prediction
53
- #neutralProb = prediction.data[0][neutralIndex]
54
- #toxicProb = prediction.data[0][toxicIndex]
55
 
56
  # Expected returns from RoBERTa on default text:
57
  # Neutral: 0.0052
58
  # Toxic: 0.9948
59
  st.write("Classification Probabilities")
60
- #st.write(f"{neutralProb:.4f} - NEUTRAL")
61
- #st.write(f"{toxicProb:.4f} - TOXIC")
 
41
  # RoBERTA: [0]: neutral, [1]: toxic
42
  encoding = tokenizer.encode(txt, return_tensors='pt')
43
  result = model(encoding)
 
44
 
45
  # transform logit to get probabilities
46
  if (result.logits.size(dim=1) < 2):
 
48
  result.logits = nn.functional.pad(result.logits, pad, "constant", 0)
49
  st.write(result)
50
  prediction = nn.functional.softmax(result.logits, dim=-1)
51
+ neutralProb = prediction.data[0][neutralIndex]
52
+ toxicProb = prediction.data[0][toxicIndex]
 
53
 
54
  # Expected returns from RoBERTa on default text:
55
  # Neutral: 0.0052
56
  # Toxic: 0.9948
57
  st.write("Classification Probabilities")
58
+ st.write(f"{neutralProb:.4f} - NEUTRAL")
59
+ st.write(f"{toxicProb:.4f} - TOXIC")