from transformers import T5Tokenizer, T5ForConditionalGeneration import torch import gradio as gr device = 'cuda' if torch.cuda.is_available() else 'cpu' model = T5ForConditionalGeneration.from_pretrained("vennify/t5-base-grammar-correction") tokenizer = T5Tokenizer.from_pretrained("vennify/t5-base-grammar-correction") model.to(device) model.eval() def generate_text(text): text = f'grammar: {text}' input_ids = tokenizer( text, return_tensors="pt" ).input_ids input_ids = input_ids.to(device) outputs = model.generate(input_ids, max_length=512, early_stopping=True) return tokenizer.decode(outputs[0], skip_special_tokens=True) with gr.Blocks() as deeplearning: with gr.Row(): with gr.Column(): text = gr.TextArea(placeholder="Enter your text here...") button = gr.Button(label="Correct") output = gr.outputs.Textbox(label="Corrected Text") button.click(generate_text, inputs=text, outputs=output) deeplearning.launch()