import gradio as gr import torch from transformers import AutoTokenizer, AutoModelForSeq2SeqLM tokenizer = AutoTokenizer.from_pretrained("Armandoliv/t5-small-summarizer-scitldr") model = AutoModelForSeq2SeqLM.from_pretrained("Armandoliv/t5-small-summarizer-scitldr") device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = model.to(device) def main_summarizer(text): max_input_length = 1024 preprocess_text = text.strip().replace("\n"," ").replace("’", "'").strip() tokenized_text = tokenizer.encode(preprocess_text, return_tensors="pt", truncation=True, max_length=max_input_length,).to(device) summary_ids = model.generate( tokenized_text, max_length=256, num_beams=8, repetition_penalty=3.0, length_penalty=2.5, early_stopping=False ) output = tokenizer.decode(summary_ids[0], skip_special_tokens=True) return output inputs = [gr.Textbox(lines=10, placeholder="Text Here...", label="Input")] outputs = gr.Text( label="Summary") title="Text summarisation app" description = "This demo uses AI Models to summarize long text.\nIt focus on scientific texts." io = gr.Interface(fn=main_summarizer, inputs=inputs, outputs=outputs, title=title, description = description, css= """.gr-button-primary { background: -webkit-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important; background: #355764; background: linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important; background: -moz-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important; background: -webkit-linear-gradient( 90deg, #355764 0%, #55a8a1 100% ) !important; color:white !important}""" ) io.launch()