File size: 2,262 Bytes
20ebcf1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2ab0e95
 
 
 
 
 
 
 
20ebcf1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a227233
 
 
 
 
20ebcf1
2ab0e95
 
20ebcf1
2ab0e95
677e398
2347c54
a227233
2ab0e95
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import gradio as gr
import librosa
from optimum.onnxruntime import ORTModelForSeq2SeqLM
from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
import torch

# load model and processor
processor = Wav2Vec2Processor.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")
model = Wav2Vec2ForCTC.from_pretrained("jonatasgrosman/wav2vec2-large-xlsr-53-english")

tokenizer = AutoTokenizer.from_pretrained("icon-it-tdtu/mt-en-vi-optimum")
model_lm = ORTModelForSeq2SeqLM.from_pretrained("icon-it-tdtu/mt-en-vi-optimum")


def process_audio_file(file):
    data, sr = librosa.load(file)
    if sr != 16000:
        data = librosa.resample(data, sr, 16000)
    print(data.shape)
    inputs = processor(data, sampling_rate=16000, return_tensors="pt", padding=True)
    return inputs


def interpret(micro, file):
    if file is not None and micro is None:
        input_audio = file
    elif file is None and micro is not None:
        input_audio = micro
    else:
        input_audio = file
    inputs = process_audio_file(input_audio)
    with torch.no_grad():
        output_logit = model(inputs.input_values).logits
    pred_ids = torch.argmax(output_logit, dim=-1)
    text = processor.batch_decode(pred_ids)[0].lower()
    print(text)
    translate_text = translate(text)
    return translate_text


def translate(text):
    batch = tokenizer([text], return_tensors="pt")
    generated_ids = model_lm.generate(**batch)
    translated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
    return translated_text

# Set the starting state to an empty string
iface = gr.Interface(
    fn=interpret,
    title="Interpret English to Vietnamese",
    description="A simple interface to interpret from spoken English to Vietnamese.",
    article="Author: <a href=\"https://huggingface.co/vumichien\">Vu Minh Chien</a>.", 
    inputs=[
        gr.Audio(source="microphone", type="filepath", streaming=False, label="Microphone"), 
        gr.Audio(source="upload", type="filepath", label="File"),
    ],
    outputs=gr.Textbox(label="Text output")
    ,
    examples=[[None, "samples/sample-000000.mp3"], [None, "samples/sample-000001.mp3"]],
    )
iface.launch(debug=True)