from pypinyin import pinyin from transformers import M2M100Tokenizer, M2M100ForConditionalGeneration from LAC import LAC import gradio as gr import torch model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_1.2B") model.eval() tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_1.2B") lac = LAC(mode="seg") def make_request(chinese_text): tokenizer.src_lang="zh" with torch.no_grad(): encoded_zh = tokenizer(chinese_text, return_tensors="pt") generated_tokens = model.generate(**encoded_zh, forced_bos_token_id=tokenizer.get_lang_id("en")) return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True) def generatepinyin(input): pinyin_list = pinyin(input) pinyin_string = "" for piece in pinyin_list: pinyin_string = pinyin_string+" "+piece[0] return pinyin_string def generate_response(Chinese_to_translate): response = [] response.append([Chinese_to_translate,make_request(Chinese_to_translate),generatepinyin(Chinese_to_translate)]) segmented_string_list = lac.run(Chinese_to_translate) for piece in segmented_string_list: response.append([piece,make_request(piece),generatepinyin(piece)]) return response iface = gr.Interface( fn=generate_response, title="Chinese to English", description="Chinese to English with a state-of-the-art model (facebook-research m2m-100 1.2B)", inputs=gr.inputs.Textbox(lines=5, placeholder="Enter text in Chinese"), outputs="text") iface.launch()