import gradio as gr from transformers import pipeline # Lazy loading: Define functions to load models only when needed def load_qa_model(): return pipeline("question-answering", model="bert-large-uncased-whole-word-masking-finetuned-squad") def load_classifier_model(): return pipeline("zero-shot-classification", model="MoritzLaurer/deberta-v3-base-zeroshot-v1.1-all-33") def load_translator_model(target_language): model_name = f"translation_en_to_{target_language}" return pipeline("translation_en_to_nl", model=model_name) def load_generator_model(): return pipeline("text-generation", model="EleutherAI/gpt-neo-2.7B", tokenizer="EleutherAI/gpt-neo-2.7B") def load_summarizer_model(): return pipeline("summarization", model="facebook/bart-large-cnn") # Define functions to process inputs def process_qa(context, question): qa_model = load_qa_model() return qa_model(context=context, question=question)["answer"] def process_classifier(text, labels): classifier_model = load_classifier_model() return classifier_model(text, labels)["labels"][0] def process_translation(text, target_language): translator_model = load_translator_model(target_language) translation = translator_model(text)[0]["translation_text"] return translation def process_generation(prompt): generator_model = load_generator_model() return generator_model(prompt, max_length=50)[0]["generated_text"] def process_summarization(text): summarizer_model = load_summarizer_model() return summarizer_model(text, max_length=150, min_length=40, do_sample=False)[0]["summary_text"] # Gradio Interface with gr.Blocks() as demo: gr.Markdown("Choose an NLP task and input the required text.") with gr.Tab("Single Models"): gr.Markdown("This tab is for single models demonstration.") # Single models interface task_select_single = gr.Dropdown(["Question Answering", "Zero-Shot Classification", "Translation", "Text Generation", "Summarization"], label="Select Task") input_fields_single = [gr.Textbox(label="Input")] if task_select_single.value == "Zero-Shot Classification": input_fields_single.append(gr.CheckboxGroup(["Label 1", "Label 2", "Label 3"], label="Labels")) elif task_select_single.value == "Translation": input_fields_single.append(gr.Dropdown(["nl", "fr", "es", "de"], label="Target Language")) output_text_single = gr.Textbox(label="Output") execute_button_single = gr.Button("Execute") def execute_task_single(): task = task_select_single.value inputs = [field.value for field in input_fields_single] print("Inputs (Single Models):", inputs) if task == "Translation": translation = process_translation(*inputs) print("Translation result (Single Models):", translation) output_text_single.update(translation) else: output_text_single.update(eval(f"process_{task.lower()}")(*inputs)) print("Output updated (Single Models)") execute_button_single.click(execute_task_single) with gr.Tab("Multi-model"): gr.Markdown("This tab is for multi-model demonstration.") # Multi-model interface task_select_multi = gr.Dropdown(["Question Answering", "Zero-Shot Classification", "Translation", "Text Generation", "Summarization"], label="Select Task") input_text_multi = gr.Textbox(label="Input") output_text_multi = gr.Textbox(label="Output") execute_button_multi = gr.Button("Execute") def execute_task_multi(): task = task_select_multi.value input_text = input_text_multi.value print("Input (Multi-model):", input_text) if task == "Translation": translation = process_translation(input_text, "nl") # Default to Dutch translation print("Translation result (Multi-model):", translation) output_text_multi.update(translation) else: output_text_multi.update(eval(f"process_{task.lower()}")(input_text)) print("Output updated (Multi-model)") execute_button_multi.click(execute_task_multi) demo.launch()