import gradio as gr from transformers import T5ForConditionalGeneration, T5Tokenizer import torch import os # 指定模型路徑或 Hugging Face Model Hub 上的模型 ID model_name_or_path = "DeepLearning101/Corrector101zhTWT5" auth_token = os.getenv("HF_HOME") # 嘗試加載模型和分詞器 try: tokenizer = T5Tokenizer.from_pretrained(model_name_or_path, use_auth_token=auth_token) model = T5ForConditionalGeneration.from_pretrained(model_name_or_path, use_auth_token=auth_token) model.eval() except Exception as e: print(f"加載模型或分詞器失敗,錯誤信息:{e}") exit(1) if torch.cuda.is_available(): model.cuda() # 如果可用,將模型移至 GPU def correct_text(text): """將輸入的文本通過 T5 模型進行修正""" inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True, padding=True) if torch.cuda.is_available(): inputs = {k: v.cuda() for k, v in inputs.items()} # 將輸入移至 GPU with torch.no_grad(): outputs = model.generate(**inputs) corrected_text = tokenizer.decode(outputs[0], skip_special_tokens=True) return corrected_text def main(): interface = gr.Interface( fn=correct_text, inputs=gr.Textbox(lines=5, placeholder="請輸入需要修正的中文文本..."), outputs=gr.Textbox(label="修正後的文本"), title="