File size: 1,476 Bytes
0fe7247
d4f6c5c
0fe7247
 
 
 
d4f6c5c
0fe7247
d4f6c5c
0fe7247
 
 
 
d4f6c5c
 
 
0fe7247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cbd5c6a
0fe7247
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
from pypinyin import pinyin
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
from LAC import LAC
import gradio as gr
import torch

model = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-zh-en")
model.eval()
tokenizer = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-zh-en")
lac = LAC(mode="seg")

def make_request(chinese_text):
  with torch.no_grad():
      encoded_zh = tokenizer.prepare_seq2seq_batch([chinese_text], return_tensors="pt")
      generated_tokens = model.generate(**encoded_zh)
      return tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)

def generatepinyin(input):
  pinyin_list = pinyin(input)
  pinyin_string = ""
  for piece in pinyin_list:
    pinyin_string = pinyin_string+" "+piece[0]
  return pinyin_string

def generate_response(Chinese_to_translate):
    response = []
    response.append([Chinese_to_translate,make_request(Chinese_to_translate),generatepinyin(Chinese_to_translate)])
    segmented_string_list = lac.run(Chinese_to_translate)
    for piece in segmented_string_list:
        response.append([piece,make_request(piece),generatepinyin(piece)])
    return response

iface = gr.Interface(
    fn=generate_response,
    title="Chinese to English",
    description="Chinese to English with Helsinki Research's Chinese to English model. Makes for extremely FAST translations.",
    inputs=gr.inputs.Textbox(lines=5, placeholder="Enter text in Chinese"),
    outputs="text")

iface.launch()