Spaces:
Sleeping
Sleeping
DarwinAnim8or
commited on
Commit
•
5841603
1
Parent(s):
ecae947
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Import gradio and transformers libraries
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
4 |
+
|
5 |
+
# Load the small deberta models for hate and offensive speech detection
|
6 |
+
hate_model = AutoModelForSequenceClassification.from_pretrained("KoalaAI/HateSpeechDetector")
|
7 |
+
hate_tokenizer = AutoTokenizer.from_pretrained("KoalaAI/HateSpeechDetector")
|
8 |
+
|
9 |
+
offensive_model = AutoModelForSequenceClassification.from_pretrained("KoalaAI/OffensiveSpeechDetector")
|
10 |
+
offensive_tokenizer = AutoTokenizer.from_pretrained("KoalaAI/OffensiveSpeechDetector")
|
11 |
+
|
12 |
+
# Define a function that takes an input text and returns the scores from the models
|
13 |
+
def get_scores(text):
|
14 |
+
# Tokenize and encode the input text
|
15 |
+
hate_input = hate_tokenizer(text, return_tensors="pt")
|
16 |
+
offensive_input = offensive_tokenizer(text, return_tensors="pt")
|
17 |
+
|
18 |
+
# Get the logits from the models
|
19 |
+
hate_logits = hate_model(**hate_input).logits
|
20 |
+
offensive_logits = offensive_model(**offensive_input).logits
|
21 |
+
|
22 |
+
# Apply softmax to get probabilities
|
23 |
+
hate_probs = hate_logits.softmax(dim=1)
|
24 |
+
offensive_probs = offensive_logits.softmax(dim=1)
|
25 |
+
|
26 |
+
# Get the labels from the models
|
27 |
+
hate_labels = hate_model.config.id2label
|
28 |
+
offensive_labels = offensive_model.config.id2label
|
29 |
+
|
30 |
+
# Format the output as a dictionary of scores
|
31 |
+
output = {}
|
32 |
+
output["Hate speech"] = {hate_labels[i]: round(p.item(), 4) for i, p in enumerate(hate_probs[0])}
|
33 |
+
output["Offensive speech"] = {offensive_labels[i]: round(p.item(), 4) for i, p in enumerate(offensive_probs[0])}
|
34 |
+
|
35 |
+
return output
|
36 |
+
|
37 |
+
# Create a gradio interface with a text input and a json output
|
38 |
+
iface = gr.Interface(fn=get_scores, inputs="text", outputs="json")
|
39 |
+
|
40 |
+
# Launch the interface
|
41 |
+
iface.launch()
|