KwabenaMufasa commited on
Commit
caa4739
β€’
1 Parent(s): 67f5432

Rename gradio_app.py to app.py

Browse files
Files changed (1) hide show
  1. gradio_app.py β†’ app.py +23 -27
gradio_app.py β†’ app.py RENAMED
@@ -1,9 +1,11 @@
1
- from transformers import AutoModelForSequenceClassification
2
- from transformers import TFAutoModelForSequenceClassification
3
- from transformers import AutoTokenizer, AutoConfig
4
  import numpy as np
 
 
 
 
5
  from scipy.special import softmax
6
- import gradio as gr
7
 
8
  # Requirements
9
  model_path = "KwabenaMufasa/Finetuned-Distilbert-base-model"
@@ -11,45 +13,39 @@ tokenizer = AutoTokenizer.from_pretrained(model_path)
11
  config = AutoConfig.from_pretrained(model_path)
12
  model = AutoModelForSequenceClassification.from_pretrained(model_path)
13
 
14
-
15
- # #Preprocess text (username and link placeholders)
16
  def preprocess(text):
17
  new_text = []
18
  for t in text.split(" "):
19
- t = '@user' if t.startswith('@') and len(t) > 1 else t
20
- t = 'http' if t.startswith('http') else t
21
  new_text.append(t)
22
  return " ".join(new_text)
23
 
24
-
25
  def sentiment_analysis(text):
26
  text = preprocess(text)
27
 
28
- # PyTorch-based models
29
- encoded_input = tokenizer(text, return_tensors='pt')
30
  output = model(**encoded_input)
31
  scores_ = output[0][0].detach().numpy()
32
  scores_ = softmax(scores_)
33
 
34
  # Format output dict of scores
35
- labels = ['Negative', 'Neutral', 'Positive']
36
  scores = {l:float(s) for (l,s) in zip(labels, scores_) }
37
 
38
  return scores
39
-
40
-
41
- demo = gr.Interface(
42
- fn=sentiment_analysis,
43
- inputs=gr.Textbox(placeholder="Write your tweet here..."),
44
- outputs="label",
45
- interpretation="default",
46
- examples=[["This is Spectacular!"]])
47
-
48
 
49
 
50
-
51
- demo.launch(server_name = "0.0.0.0.", server_port = 7860)
52
-
53
- if __name__=="__gradio_app__":
54
- run()
55
-
 
 
 
 
 
 
1
+ # Import the required Libraries
2
+ import gradio as gr
 
3
  import numpy as np
4
+ import pandas as pd
5
+ import pickle
6
+ import transformers
7
+ from transformers import AutoTokenizer, AutoConfig,AutoModelForSequenceClassification,TFAutoModelForSequenceClassification, pipeline
8
  from scipy.special import softmax
 
9
 
10
  # Requirements
11
  model_path = "KwabenaMufasa/Finetuned-Distilbert-base-model"
 
13
  config = AutoConfig.from_pretrained(model_path)
14
  model = AutoModelForSequenceClassification.from_pretrained(model_path)
15
 
16
+ #Preprocess text
 
17
  def preprocess(text):
18
  new_text = []
19
  for t in text.split(" "):
20
+ t = "@user" if t.startswith("@") and len(t) > 1 else t
21
+ t = "http" if t.startswith("http") else t
22
  new_text.append(t)
23
  return " ".join(new_text)
24
 
25
+ #Process the input and return prediction
26
  def sentiment_analysis(text):
27
  text = preprocess(text)
28
 
29
+ encoded_input = tokenizer(text, return_tensors = "pt") # for PyTorch-based models
 
30
  output = model(**encoded_input)
31
  scores_ = output[0][0].detach().numpy()
32
  scores_ = softmax(scores_)
33
 
34
  # Format output dict of scores
35
+ labels = ["Negative", "Neutral", "Positive"]
36
  scores = {l:float(s) for (l,s) in zip(labels, scores_) }
37
 
38
  return scores
 
 
 
 
 
 
 
 
 
39
 
40
 
41
+ #Gradio app interface
42
+ app = gr.Interface(fn = sentiment_analysis,
43
+ inputs = gr.Textbox("Write your text or tweet here"),
44
+ outputs = "label",
45
+ title = "Twitter Sentiment Analyzer App",
46
+ description = "Vaccinate or Do Not Vaccinate",
47
+ interpretation = "default",
48
+ examples = [["Being vaccinated is actually awesome :)"]]
49
+ )
50
+
51
+ app.launch()