Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,11 +1,30 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import pipeline
|
3 |
from gradio.components import Textbox
|
4 |
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
def predict_sentiment(text):
|
11 |
"""
|
@@ -13,10 +32,8 @@ def predict_sentiment(text):
|
|
13 |
:param text: str, input text to analyze.
|
14 |
:return: str, predicted sentiment and confidence score.
|
15 |
"""
|
16 |
-
result =
|
17 |
-
|
18 |
-
score = result['score']
|
19 |
-
return f"TAG: {label}, Confidence: {score:.2f}"
|
20 |
|
21 |
input1 = Textbox(lines=2, placeholder="Type your text here...")
|
22 |
|
@@ -24,8 +41,8 @@ input1 = Textbox(lines=2, placeholder="Type your text here...")
|
|
24 |
iface = gr.Interface(fn=predict_sentiment,
|
25 |
inputs=input1,
|
26 |
outputs="text",
|
27 |
-
title="
|
28 |
-
description="This model predicts the
|
29 |
|
30 |
# Launch the interface
|
31 |
iface.launch()
|
|
|
1 |
import gradio as gr
|
|
|
2 |
from gradio.components import Textbox
|
3 |
|
4 |
+
import torch
|
5 |
+
from transformers import pipeline
|
6 |
+
from transformers import AutoTokenizer
|
7 |
+
from transformers import AutoModelForSequenceClassification
|
8 |
+
|
9 |
+
# Load the DistilBERT tokenizer
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
|
11 |
+
|
12 |
+
#Load the model
|
13 |
+
model = AutoModelForSequenceClassification.from_pretrained("skylord/pharma_classification")
|
14 |
+
|
15 |
|
16 |
+
def is_pharma(sentence, tokenize=tokenizer, model=model):
|
17 |
+
# tokenize the input
|
18 |
+
inputs = tokenizer(sentence, return_tensors='pt')
|
19 |
+
# ensure model and inputs are on the same device (GPU)
|
20 |
+
inputs = {name: tensor.cuda() for name, tensor in inputs.items()}
|
21 |
+
model = model.cuda()
|
22 |
+
# get prediction - 2 classes "probabilities" (not really true because they still need to be normalized)
|
23 |
+
with torch.no_grad():
|
24 |
+
predictions = model(**inputs)[0].cpu().numpy()
|
25 |
+
# get the top prediction class and convert it to its associated label
|
26 |
+
top_prediction = predictions.argmax().item()
|
27 |
+
return ds['train'].features['labels'].int2str(top_prediction)
|
28 |
|
29 |
def predict_sentiment(text):
|
30 |
"""
|
|
|
32 |
:param text: str, input text to analyze.
|
33 |
:return: str, predicted sentiment and confidence score.
|
34 |
"""
|
35 |
+
result = is_pharma(text)
|
36 |
+
return f"TAG: {result}" #, Confidence: {score:.2f}
|
|
|
|
|
37 |
|
38 |
input1 = Textbox(lines=2, placeholder="Type your text here...")
|
39 |
|
|
|
41 |
iface = gr.Interface(fn=predict_sentiment,
|
42 |
inputs=input1,
|
43 |
outputs="text",
|
44 |
+
title="Identify if the news item is relevant to the pharma industry",
|
45 |
+
description="This model predicts the tag of the input text. Enter a sentence to see if it's pharma or not. Response is a Yes or a No")
|
46 |
|
47 |
# Launch the interface
|
48 |
iface.launch()
|