TLeonidas commited on
Commit
0436d22
1 Parent(s): 7d53514

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -21
app.py CHANGED
@@ -1,27 +1,32 @@
1
  import gradio as gr
2
  import joblib
3
 
4
- # Function to load your model (adjust the path and method if needed)
5
- def load_model():
6
- # This path is relative to the root of your Hugging Face Space
7
- model_path = "./en-hate-speech-detection-3label"
8
- model = joblib.load(model_path)
9
- return model
10
-
11
- # Function to predict hate speech from text input
12
- def predict_hate_speech(text):
13
- model = load_model() # Load your model
14
- prediction = model.predict([text])
15
- # Assuming your model outputs integers representing classes, you might want to convert
16
- # these to more readable labels. Adjust these labels according to your model's output.
17
- labels = {0: 'Neutral or Ambiguous', 1: 'Not Hate', 2: 'Offensive or Hate Speech'}
18
- return labels[prediction[0]]
19
-
20
- # Adjusted Gradio interface to take text input and output model predictions
21
- iface = gr.Interface(fn=predict_hate_speech,
22
- inputs=gr.Textbox(lines=2, placeholder="Enter Text Here..."),
23
- outputs="text",
24
- description="Detects hate speech in text. Outputs 'Neutral or Ambiguous', 'Not Hate', 'Offensive or Hate Speech'.")
 
 
 
 
 
25
  iface.launch()
26
 
27
 
 
1
  import gradio as gr
2
  import joblib
3
 
4
+ # Load your serialized objects
5
+ model = joblib.load('random_forest_model_3labels2.joblib')
6
+ encoder = joblib.load('label_encoder2.joblib')
7
+ vectorizer = joblib.load('count_vectorizer2.joblib')
8
+
9
+ def predict(input_text):
10
+ # Preprocess the input with your vectorizer and encoder as needed
11
+ # For example, if your model expects vectorized input:
12
+ vectorized_text = vectorizer.transform([input_text])
13
+
14
+ # Make a prediction
15
+ prediction = model.predict(vectorized_text)
16
+
17
+ # If your model's output needs to be decoded (optional)
18
+ # decoded_prediction = encoder.inverse_transform(prediction)
19
+
20
+ # Return the prediction (you might want to convert it into a more readable form)
21
+ return prediction[0] # Modify this according to your needs
22
+
23
+ # Setup the Gradio interface
24
+ iface = gr.Interface(fn=predict,
25
+ inputs=gr.Textbox(lines=2, placeholder="Enter Text Here..."),
26
+ outputs="text",
27
+ description="Your model description here.")
28
+
29
+ # Launch the app
30
  iface.launch()
31
 
32