shrirangphadke commited on
Commit
dccce3b
1 Parent(s): db866d2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -6
app.py CHANGED
@@ -1,12 +1,66 @@
1
  import gradio as gr
2
  import os
 
3
 
4
- def get_hatespeech_score(text):
5
- io = gr.Interface.load("unhcr/hatespeech-detection")
6
- return io(text)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
 
8
  def text_analysis(text):
9
- label_1 = get_hatespeech_score(text)
 
 
 
 
10
  html = '''<!doctype html>
11
  <html>
12
  <body>
@@ -29,8 +83,7 @@ def text_analysis(text):
29
  </div>
30
  </body>
31
  </html>
32
- '''.format("Alpha", label_1, "Gamma", "Theta")
33
-
34
  return html
35
 
36
  demo = gr.Interface(
 
1
  import gradio as gr
2
  import os
3
+ import torch
4
 
5
+ # Load model directly
6
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
7
+
8
+
9
+ # Install necessary libraries
10
+ !pip install transformers
11
+ !pip install torch
12
+ !pip install vaderSentiment
13
+
14
+ import torch
15
+ from transformers import RobertaTokenizer, RobertaForSequenceClassification
16
+ from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
17
+
18
+ # Load pre-trained RoBERTa model and tokenizer
19
+ tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
20
+ model = RobertaForSequenceClassification.from_pretrained('roberta-base')
21
+
22
+ # Define a function to analyze text for potential adult content
23
+ def analyze_adult_content(text):
24
+ # Tokenize input text
25
+ inputs = tokenizer(text, return_tensors='pt')
26
+
27
+ # Perform inference
28
+ outputs = model(**inputs)
29
+
30
+ # Get predicted label (0: Not Adult Content, 1: Adult Content)
31
+ predicted_label_idx = torch.argmax(outputs.logits).item()
32
+ predicted_label = model.config.id2label[predicted_label_idx]
33
+
34
+ return predicted_label
35
+
36
+ # Define a function to analyze the sentiment of the text using VADER
37
+ def analyze_sentiment(text):
38
+ analyzer = SentimentIntensityAnalyzer()
39
+ sentiment_scores = analyzer.polarity_scores(text)
40
+
41
+ # Determine sentiment label based on compound score
42
+ if sentiment_scores['compound'] >= 0.05:
43
+ sentiment_label = 'Positive'
44
+ elif sentiment_scores['compound'] <= -0.05:
45
+ sentiment_label = 'Negative'
46
+ else:
47
+ sentiment_label = 'Neutral'
48
+
49
+ return sentiment_label, sentiment_scores
50
+
51
+ # Example text
52
+ text = "I really enjoy watching this movie, it's so entertaining!"
53
+
54
+ # Analyze adult content
55
+ adult_content_label = analyze_adult_content(text)
56
+ print("Adult Content Label:", adult_content_label)
57
 
58
  def text_analysis(text):
59
+ # Analyze sentiment
60
+ sentiment_label, sentiment_scores = analyze_sentiment(text)
61
+ print("Sentiment Label:", sentiment_label)
62
+ print("Sentiment Scores:", sentiment_scores)
63
+
64
  html = '''<!doctype html>
65
  <html>
66
  <body>
 
83
  </div>
84
  </body>
85
  </html>
86
+ '''.format(sentiment_label, sentiment_scores, "Gamma", "Theta")
 
87
  return html
88
 
89
  demo = gr.Interface(