Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,9 +1,12 @@
|
|
1 |
import gradio as gr
|
2 |
-
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
3 |
import json
|
4 |
import torch
|
|
|
|
|
|
|
5 |
|
6 |
-
|
|
|
7 |
learning_rate = 3e-5
|
8 |
batch_size = 16
|
9 |
epochs = 3
|
@@ -12,12 +15,15 @@ warmup_steps = 100
|
|
12 |
weight_decay = 0.01
|
13 |
dropout_prob = 0.1
|
14 |
gradient_clip_value = 1.0
|
15 |
-
|
16 |
-
context_val = ''
|
17 |
-
|
18 |
q_n_a_model_name = "deepset/roberta-base-squad2"
|
19 |
q_n_a_model = AutoModelForQuestionAnswering.from_pretrained(q_n_a_model_name)
|
20 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
|
22 |
context = gr.Textbox(label="Add the Context (Paragraph or texts) for which you want to get insights", lines=10, outputs="text")
|
23 |
|
@@ -29,7 +35,7 @@ def q_n_a_fn(context, text):
|
|
29 |
q_n_a_model.to(device)
|
30 |
|
31 |
# Convert inputs to tensors
|
32 |
-
inputs =
|
33 |
|
34 |
# Get predictions
|
35 |
with torch.no_grad():
|
@@ -46,14 +52,21 @@ def q_n_a_fn(context, text):
|
|
46 |
answer_tokens = inputs["input_ids"][0][start_idx : end_idx + 1]
|
47 |
|
48 |
# Decode the answer tokens into a human-readable answer
|
49 |
-
answer =
|
50 |
|
51 |
return answer
|
52 |
|
53 |
-
def classification_fn(
|
54 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
55 |
|
56 |
-
def translate_fn(text):
|
57 |
return context
|
58 |
|
59 |
with gr.Blocks(theme='gradio/soft') as demo:
|
@@ -65,11 +78,11 @@ with gr.Blocks(theme='gradio/soft') as demo:
|
|
65 |
|
66 |
with gr.Tab("Classifier"):
|
67 |
with gr.Row():
|
68 |
-
gr.Interface(fn=classification_fn, inputs=[context], outputs=
|
69 |
|
70 |
with gr.Tab("Translation"):
|
71 |
with gr.Row():
|
72 |
-
gr.Interface(fn=translate_fn, inputs=[gr.Radio(["French", "Hindi", "Spanish"], label="Languages", info="Select language")], outputs="text")
|
73 |
|
74 |
with gr.Tab("Summarization"):
|
75 |
with gr.Row():
|
|
|
1 |
import gradio as gr
|
|
|
2 |
import json
|
3 |
import torch
|
4 |
+
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
|
5 |
+
from transformers import AutoModelForTokenClassification
|
6 |
+
from transformers import DistilBertTokenizer, DistilBertForSequenceClassification
|
7 |
|
8 |
+
|
9 |
+
# Define hyperparameters for QnA
|
10 |
learning_rate = 3e-5
|
11 |
batch_size = 16
|
12 |
epochs = 3
|
|
|
15 |
weight_decay = 0.01
|
16 |
dropout_prob = 0.1
|
17 |
gradient_clip_value = 1.0
|
|
|
|
|
|
|
18 |
q_n_a_model_name = "deepset/roberta-base-squad2"
|
19 |
q_n_a_model = AutoModelForQuestionAnswering.from_pretrained(q_n_a_model_name)
|
20 |
+
q_n_a_tokenizer = AutoTokenizer.from_pretrained(q_n_a_model_name)
|
21 |
+
|
22 |
+
|
23 |
+
classification_tokenizer = DistilBertTokenizer.from_pretrained("distilbert-base-uncased")
|
24 |
+
classification_model = DistilBertForSequenceClassification.from_pretrained("distilbert-base-uncased")
|
25 |
+
|
26 |
+
|
27 |
|
28 |
context = gr.Textbox(label="Add the Context (Paragraph or texts) for which you want to get insights", lines=10, outputs="text")
|
29 |
|
|
|
35 |
q_n_a_model.to(device)
|
36 |
|
37 |
# Convert inputs to tensors
|
38 |
+
inputs = q_n_a_tokenizer(QA_input["context"], QA_input["question"], return_tensors="pt", max_length=max_seq_length, truncation=True, padding="max_length").to(device)
|
39 |
|
40 |
# Get predictions
|
41 |
with torch.no_grad():
|
|
|
52 |
answer_tokens = inputs["input_ids"][0][start_idx : end_idx + 1]
|
53 |
|
54 |
# Decode the answer tokens into a human-readable answer
|
55 |
+
answer = q_n_a_tokenizer.decode(inputs["input_ids"][0][start_idx:end_idx+1], skip_special_tokens=True)
|
56 |
|
57 |
return answer
|
58 |
|
59 |
+
def classification_fn(context):
|
60 |
+
inputs = classification_tokenizer(context, return_tensors="pt")
|
61 |
+
with torch.no_grad():
|
62 |
+
logits = classification_model(**inputs).logits
|
63 |
+
class_probabilities = torch.softmax(logits, dim=1)
|
64 |
+
|
65 |
+
class_probabilities = torch.softmax(logits, dim=1)
|
66 |
+
class_probabilities = class_probabilities[0].tolist() # Convert to a Python list
|
67 |
+
return {"POSITIVE": class_probabilities[0], "NEGATIVE": class_probabilities[1]}
|
68 |
|
69 |
+
def translate_fn(context, text):
|
70 |
return context
|
71 |
|
72 |
with gr.Blocks(theme='gradio/soft') as demo:
|
|
|
78 |
|
79 |
with gr.Tab("Classifier"):
|
80 |
with gr.Row():
|
81 |
+
gr.Interface(fn=classification_fn, inputs=[context], outputs=[gr.Label()])
|
82 |
|
83 |
with gr.Tab("Translation"):
|
84 |
with gr.Row():
|
85 |
+
gr.Interface(fn=translate_fn, inputs=[context, gr.Radio(["French", "Hindi", "Spanish"], label="Languages", info="Select language")], outputs="text")
|
86 |
|
87 |
with gr.Tab("Summarization"):
|
88 |
with gr.Row():
|