Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import T5Tokenizer, T5ForConditionalGeneration | |
| import torch | |
| # ุชุญู ูู ุงูู ูุฏูู ู ู ุงููุงุบููุบ ููุณ | |
| model_name = "mimoha/t5-title-generator" | |
| tokenizer = T5Tokenizer.from_pretrained(model_name) | |
| model = T5ForConditionalGeneration.from_pretrained(model_name) | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| model = model.to(device) | |
| def generate_title(abstract): | |
| inputs = tokenizer( | |
| "summarize: " + abstract, | |
| return_tensors="pt", | |
| max_length=256, | |
| truncation=True | |
| ).to(device) | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=32, | |
| num_beams=5, | |
| num_return_sequences=3, | |
| early_stopping=True, | |
| no_repeat_ngram_size=2 | |
| ) | |
| titles = [tokenizer.decode(out, skip_special_tokens=True) for out in outputs] | |
| return "\n".join([f"{i+1}. {title}" for i, title in enumerate(titles)]) | |
| iface = gr.Interface( | |
| fn=generate_title, | |
| inputs=gr.Textbox(label="Summary"), | |
| outputs=gr.Textbox(label="Generated titles"), | |
| title="Research Title Generator", | |
| description="Enter a search term and we will generate 3 possible titles for you using T5." | |
| ) | |
| iface.launch(share=True, show_error=True) | |