import torch import gradio as gr from accelerate import Accelerator from transformers import pipeline # Initialize the accelerator accelerator = Accelerator() # Print a description of the current configuration print("Accelerator State:", accelerator.state) # Define the path to your custom model or use a pre-trained Hugging Face model model_path = ( "../Models/models--sshleifer--distilbart-cnn-12-6/snapshots" "/a4f8f3ea906ed274767e9906dbaede7531d660ff" ) # Initialize the text summarization pipeline for CPU usage try: text_summary = pipeline( "summarization", model=model_path, # Use the custom model path device=-1 # Force usage of CPU ) except Exception as e: print(f"Error initializing the summarization pipeline: {e}") print("Switching to the default model from Hugging Face.") # Fallback to default model from Hugging Face if the custom model fails text_summary = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", device=-1) # Define the Gradio interface function def summary(input_text): try: # Generate summary using the text_summary pipeline output = text_summary(input_text) return output[0]['summary_text'] except Exception as e: return f"An error occurred while summarizing: {e}" # Close any existing Gradio interfaces gr.close_all() # Define the Gradio interface demo = gr.Interface( fn=summary, inputs=gr.Textbox(label="Input Text to Summarize", lines=6, placeholder="Enter text here..."), outputs=gr.Textbox(label="Summarized Text", lines=4), title="GenAIProject01: Text Summarizer", description="THIS APPLICATION WILL BE USED TO SUMMARIZE INPUT TEXT USING A PRE-TRAINED MODEL." ) # Launch the Gradio app demo.launch(share=True)