Spaces:
Sleeping
Sleeping
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
# Lazy loading: Define functions to load models only when needed
|
5 |
+
def load_qa_model():
|
6 |
+
return pipeline("question-answering", model="bert-large-uncased-whole-word-masking-finetuned-squad")
|
7 |
+
|
8 |
+
def load_classifier_model():
|
9 |
+
return pipeline("zero-shot-classification", model="MoritzLaurer/deberta-v3-base-zeroshot-v1.1-all-33")
|
10 |
+
|
11 |
+
def load_translator_model(target_language):
|
12 |
+
model_name = f"translation_en_to_{target_language}"
|
13 |
+
return pipeline("translation_en_to_nl", model=model_name)
|
14 |
+
|
15 |
+
def load_generator_model():
|
16 |
+
return pipeline("text-generation", model="EleutherAI/gpt-neo-2.7B", tokenizer="EleutherAI/gpt-neo-2.7B")
|
17 |
+
|
18 |
+
def load_summarizer_model():
|
19 |
+
return pipeline("summarization", model="facebook/bart-large-cnn")
|
20 |
+
|
21 |
+
# Define functions to process inputs
|
22 |
+
def process_qa(context, question):
|
23 |
+
qa_model = load_qa_model()
|
24 |
+
return qa_model(context=context, question=question)["answer"]
|
25 |
+
|
26 |
+
def process_classifier(text, labels):
|
27 |
+
classifier_model = load_classifier_model()
|
28 |
+
return classifier_model(text, labels)["labels"][0]
|
29 |
+
|
30 |
+
def process_translation(text, target_language):
|
31 |
+
translator_model = load_translator_model(target_language)
|
32 |
+
translation = translator_model(text)[0]["translation_text"]
|
33 |
+
return translation
|
34 |
+
|
35 |
+
def process_generation(prompt):
|
36 |
+
generator_model = load_generator_model()
|
37 |
+
return generator_model(prompt, max_length=50)[0]["generated_text"]
|
38 |
+
|
39 |
+
def process_summarization(text):
|
40 |
+
summarizer_model = load_summarizer_model()
|
41 |
+
return summarizer_model(text, max_length=150, min_length=40, do_sample=False)[0]["summary_text"]
|
42 |
+
|
43 |
+
# Gradio Interface
|
44 |
+
with gr.Blocks() as demo:
|
45 |
+
gr.Markdown("Choose an NLP task and input the required text.")
|
46 |
+
|
47 |
+
with gr.Tab("Single Models"):
|
48 |
+
gr.Markdown("This tab is for single models demonstration.")
|
49 |
+
# Single models interface
|
50 |
+
task_select_single = gr.Dropdown(["Question Answering", "Zero-Shot Classification", "Translation", "Text Generation", "Summarization"], label="Select Task")
|
51 |
+
input_fields_single = [gr.Textbox(label="Input")]
|
52 |
+
if task_select_single.value == "Zero-Shot Classification":
|
53 |
+
input_fields_single.append(gr.CheckboxGroup(["Label 1", "Label 2", "Label 3"], label="Labels"))
|
54 |
+
elif task_select_single.value == "Translation":
|
55 |
+
input_fields_single.append(gr.Dropdown(["nl", "fr", "es", "de"], label="Target Language"))
|
56 |
+
output_text_single = gr.Textbox(label="Output")
|
57 |
+
|
58 |
+
execute_button_single = gr.Button("Execute")
|
59 |
+
|
60 |
+
def execute_task_single():
|
61 |
+
task = task_select_single.value
|
62 |
+
inputs = [field.value for field in input_fields_single]
|
63 |
+
print("Inputs (Single Models):", inputs)
|
64 |
+
if task == "Translation":
|
65 |
+
translation = process_translation(*inputs)
|
66 |
+
print("Translation result (Single Models):", translation)
|
67 |
+
output_text_single.update(translation)
|
68 |
+
else:
|
69 |
+
output_text_single.update(eval(f"process_{task.lower()}")(*inputs))
|
70 |
+
print("Output updated (Single Models)")
|
71 |
+
|
72 |
+
execute_button_single.click(execute_task_single)
|
73 |
+
|
74 |
+
with gr.Tab("Multi-model"):
|
75 |
+
gr.Markdown("This tab is for multi-model demonstration.")
|
76 |
+
# Multi-model interface
|
77 |
+
task_select_multi = gr.Dropdown(["Question Answering", "Zero-Shot Classification", "Translation", "Text Generation", "Summarization"], label="Select Task")
|
78 |
+
input_text_multi = gr.Textbox(label="Input")
|
79 |
+
output_text_multi = gr.Textbox(label="Output")
|
80 |
+
|
81 |
+
execute_button_multi = gr.Button("Execute")
|
82 |
+
|
83 |
+
def execute_task_multi():
|
84 |
+
task = task_select_multi.value
|
85 |
+
input_text = input_text_multi.value
|
86 |
+
print("Input (Multi-model):", input_text)
|
87 |
+
if task == "Translation":
|
88 |
+
translation = process_translation(input_text, "nl") # Default to Dutch translation
|
89 |
+
print("Translation result (Multi-model):", translation)
|
90 |
+
output_text_multi.update(translation)
|
91 |
+
else:
|
92 |
+
output_text_multi.update(eval(f"process_{task.lower()}")(input_text))
|
93 |
+
print("Output updated (Multi-model)")
|
94 |
+
|
95 |
+
execute_button_multi.click(execute_task_multi)
|
96 |
+
|
97 |
+
demo.launch()
|