cotxetj commited on
Commit
0668b67
1 Parent(s): 7d41558
Files changed (1) hide show
  1. app.py +81 -0
app.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import pipeline, VitsModel, VitsTokenizer
3
+ import numpy as np
4
+ import gradio as gr
5
+
6
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
7
+
8
+ # Load Whisper-small
9
+ pipe = pipeline("automatic-speech-recognition",
10
+ model="openai/whisper-small",
11
+ device=device
12
+ )
13
+
14
+ # Load the model checkpoint and tokenizer
15
+ #model = VitsModel.from_pretrained("Matthijs/mms-tts-fra")
16
+ #tokenizer = VitsTokenizer.from_pretrained("Matthijs/mms-tts-fra")
17
+ model = VitsModel.from_pretrained("facebook/mms-tts-fra")
18
+ tokenizer = VitsTokenizer.from_pretrained("facebook/mms-tts-fra")
19
+
20
+
21
+ # Define a function to translate an audio, in english here
22
+ def translate(audio):
23
+ outputs = pipe(audio, max_new_tokens=256,
24
+ generate_kwargs={"task": "transcribe", "language": "eng"})
25
+ return outputs["text"]
26
+
27
+
28
+ # Define function to generate the waveform output
29
+ def synthesise(text):
30
+ inputs = tokenizer(text, return_tensors="pt")
31
+ input_ids = inputs["input_ids"]
32
+
33
+ with torch.no_grad():
34
+ outputs = model(input_ids)
35
+
36
+ return outputs.audio[0]
37
+
38
+
39
+ # Define the pipeline
40
+ def speech_to_speech_translation(audio):
41
+ translated_text = translate(audio)
42
+ synthesised_speech = synthesise(translated_text)
43
+ synthesised_speech = (
44
+ synthesised_speech.numpy() * 32767).astype(np.int16)
45
+ return 16000, synthesised_speech
46
+
47
+ def predict(transType, language, audio, audio_mic = None):
48
+ if not audio and audio_mic:
49
+ audio = audio_mic
50
+ if transType == "Text":
51
+ return translate(audio)
52
+ if transType == "Audio":
53
+ return speech_to_speech_translation(audio)
54
+
55
+ # Define the title etc
56
+ title = "Swedish STSOT (Speech To Speech Or Text)"
57
+ description="Use Whisper pretrained model to convert swedish audio to english (text or audio)"
58
+
59
+ demo = gr.Blocks()
60
+
61
+ supportLangs = ["Swedish", "French (in training)"]
62
+ transTypes = ["Text", "Audio"]
63
+ demo = gr.Interface(
64
+ fn=predict,
65
+ inputs=[
66
+ gr.Radio(label="Choose your output format", choices=transTypes),
67
+ gr.Radio(label="Choose a source language", choices=supportLangs, value="Swedish"),
68
+ gr.Audio(label="Import an audio", source="upload", type="numpy"),
69
+ gr.Audio(label="Record an audio", source="microphone", type="numpy"),
70
+ ],
71
+ outputs=[
72
+ gr.Text(label="Translation"),
73
+ ],
74
+ title=title,
75
+ description=description,
76
+ article="",
77
+ examples=[],
78
+ ).launch()
79
+
80
+
81
+ demo.launch()