# app.py import gradio as gr from transformers import AutoTokenizer, AutoModelForSeq2SeqLM # モデルとトークナイザーを読み込み tokenizer = AutoTokenizer.from_pretrained("sonoisa/t5-base-japanese") model = AutoModelForSeq2SeqLM.from_pretrained("sonoisa/t5-base-japanese") def summarize(text): inputs = tokenizer("要約: " + text, return_tensors="pt", max_length=512, truncation=True) summary_ids = model.generate( inputs["input_ids"], max_length=150, min_length=30, length_penalty=2.0, num_beams=4, early_stopping=True ) summary = tokenizer.decode(summary_ids[0], skip_special_tokens=True) return summary iface = gr.Interface(fn=summarize, inputs="text", outputs="text") iface.launch()