HY-MT-Demo / app.py
ailan-ai's picture
Update app.py
ecda03a verified
import gradio as gr
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
# 设置设备,如果有GPU则使用GPU
# 在免费CPU服务器上,这里会是 'cpu'
device = "cuda" if torch.cuda.is_available() else "cpu"
print(f"Using device: {device}")
# --- 修改部分开始 ---
# 只加载Chimera集成模型和分词器
print("Loading Hunyuan-MT-Chimera-7B model and tokenizer...")
tokenizer = AutoTokenizer.from_pretrained("tencent/Hunyuan-MT-Chimera-7B", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("tencent/Hunyuan-MT-Chimera-7B", trust_remote_code=True).to(device)
print("Model loaded successfully.")
# --- 修改部分结束 ---
def translate(text_to_translate, source_lang, target_lang):
"""
使用已加载的Chimera模型进行翻译
"""
prompt = f"Translate the following text from {source_lang} to {target_lang}:\n{text_to_translate}"
inputs = tokenizer(prompt, return_tensors="pt").to(device)
# 生成翻译结果
output = model.generate(**inputs, max_new_tokens=256)
# 解码并清理结果
response = tokenizer.decode(output[0], skip_special_tokens=True)
# 移除prompt部分,只返回翻译结果
translated_text = response.replace(prompt, "").strip()
return translated_text
# --- 创建Gradio界面 ---
with gr.Blocks() as demo:
gr.Markdown("# 腾讯混元翻译模型体验Demo")
gr.Markdown("模型: Hunyuan-MT-Chimera-7B (集成优化版)")
with gr.Row():
source_language = gr.Textbox(label="源语言", value="Chinese")
target_language = gr.Textbox(label="目标语言", value="English")
input_text = gr.Textbox(label="输入文本", lines=5, placeholder="在这里输入需要翻译的文本...")
output_text = gr.Textbox(label="翻译结果", lines=5)
translate_button = gr.Button("开始翻译")
translate_button.click(
fn=translate,
inputs=[input_text, source_language, target_language],
outputs=output_text
)
# 启动应用
demo.launch()