nppmatt commited on
Commit
1345311
1 Parent(s): 36ce82a
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -12,7 +12,6 @@ st.button("Submit Text")
12
 
13
  # Load tokenizer and model weights, try to default to RoBERTa.
14
  # Huggingface does not support Python 3.10 match statements and I'm too lazy to implement an equivalent.
15
-
16
  if (option == "RoBERTa"):
17
  tokenizerPath = "s-nlp/roberta_toxicity_classifier"
18
  modelPath = "s-nlp/roberta_toxicity_classifier"
@@ -37,16 +36,15 @@ else:
37
  tokenizer = AutoTokenizer.from_pretrained(tokenizerPath)
38
  model = AutoModelForSequenceClassification.from_pretrained(modelPath)
39
 
40
- # run encoding through model to get classification output
41
  # RoBERTA: [0]: neutral, [1]: toxic
42
  encoding = tokenizer.encode(txt, return_tensors='pt')
43
  result = model(encoding)
44
 
45
- # transform logit to get probabilities
46
  if (result.logits.size(dim=1) < 2):
47
  pad = (0, 1)
48
  result.logits = nn.functional.pad(result.logits, pad, "constant", 0)
49
- st.write(result)
50
  prediction = nn.functional.softmax(result.logits, dim=-1)
51
  neutralProb = prediction.data[0][neutralIndex]
52
  toxicProb = prediction.data[0][toxicIndex]
 
12
 
13
  # Load tokenizer and model weights, try to default to RoBERTa.
14
  # Huggingface does not support Python 3.10 match statements and I'm too lazy to implement an equivalent.
 
15
  if (option == "RoBERTa"):
16
  tokenizerPath = "s-nlp/roberta_toxicity_classifier"
17
  modelPath = "s-nlp/roberta_toxicity_classifier"
 
36
  tokenizer = AutoTokenizer.from_pretrained(tokenizerPath)
37
  model = AutoModelForSequenceClassification.from_pretrained(modelPath)
38
 
39
+ # Run encoding through model to get classification output.
40
  # RoBERTA: [0]: neutral, [1]: toxic
41
  encoding = tokenizer.encode(txt, return_tensors='pt')
42
  result = model(encoding)
43
 
44
+ # Transform logit to get probabilities.
45
  if (result.logits.size(dim=1) < 2):
46
  pad = (0, 1)
47
  result.logits = nn.functional.pad(result.logits, pad, "constant", 0)
 
48
  prediction = nn.functional.softmax(result.logits, dim=-1)
49
  neutralProb = prediction.data[0][neutralIndex]
50
  toxicProb = prediction.data[0][toxicIndex]