from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM import torch import gradio as gr # Step 1: Create the pipeline # text_summary = pipeline("summarization", model="Falconsai/text_summarization") text_summary = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6") # Step 2: Load the model and modify its dtype model = AutoModelForSeq2SeqLM.from_pretrained( "sshleifer/distilbart-cnn-12-6", torch_dtype=torch.bfloat16) # Step 3: Replace the model in the pipeline text_summary.model = model def summary(input): output = text_summary(input) return output[0]['summary_text'] gr.close_all() # demo = gr.Interface(fn=summary, inputs="text",outputs="text") demo = gr.Interface(fn=summary, inputs=[gr.Textbox( label="Input text to summarize", lines=6)], outputs=[gr.Textbox(label="Summarized text", lines=4)], title="Abhinav 1: Text Summarizer", description="THIS APPLICATION WILL BE USED TO SUMMARIZE THE TEXT") demo.launch(share=True)