import torch import gradio as gr import os # Set the custom cache directory for Hugging Face models # os.environ['HF_HOME'] = 'projects/Models' # os.environ['TRANSFORMERS_CACHE'] = 'projects/Models' # Using `TRANSFORMERS_CACHE` is deprecated # Use a pipeline as a high-level helper from transformers import pipeline # model_path = ("../Models/models--sshleifer--distilbart-cnn-12-6/snapshots/a4f8f3ea906ed274767e9906dbaede7531d660ff") # summarize_text = pipeline("summarization", model=model_path, torch_dtype=torch.bfloat16) summarize_text = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6", torch_dtype=torch.bfloat16) # Use the pipeline # text = "Generative artificial intelligence (generative AI, GenAI,[1] or GAI) is artificial intelligence capable of generating text, images, videos, or other data using generative models,[2] often in response to prompts.[3][4] Generative AI models learn the patterns and structure of their input training data and then generate new data that has similar characteristics.[5][6] \n Improvements in transformer-based deep neural networks, particularly large language models (LLMs), enabled an AI boom of generative AI systems in the early 2020s. These include chatbots such as ChatGPT, Copilot, Gemini and LLaMA, text-to-image artificial intelligence image generation systems such as Stable Diffusion, Midjourney and DALL-E, and text-to-video AI generators such as Sora.[7][8][9][10] Companies such as OpenAI, Anthropic, Microsoft, Google, and Baidu as well as numerous smaller firms have developed generative AI models.[3][11][12] \n Generative AI has uses across a wide range of industries, including software development, healthcare, finance, entertainment, customer service,[13] sales and marketing,[14] art, writing,[15] fashion,[16] and product design.[17] However, concerns have been raised about the potential misuse of generative AI such as cybercrime, the use of fake news or deepfakes to deceive or manipulate people, and the mass replacement of human jobs." # summary = summarize_text(text) # print(summary) # Gradio setup def summary(input): output = summarize_text(input) return output[0]['summary_text'] gr.close_all() # demo = gr.Interface(fn=summary, inputs="text", outputs="text") demo = gr.Interface(fn=summary, inputs=[gr.Textbox(label="Text to summarize", lines=6)], outputs=[gr.Textbox(label="Summarized Text", lines=5)], title="Text Summarizer", description="This app is used to summarize text" ) demo.launch(share=True)