File size: 1,889 Bytes
cbc2faa
 
 
 
9ba97da
 
 
cbc2faa
 
9ba97da
 
cbc2faa
 
 
 
 
cb5c564
cbc2faa
 
 
 
 
 
 
9ba97da
 
cbc2faa
 
 
 
 
 
 
 
 
 
 
9ba97da
 
 
cbc2faa
 
 
 
 
 
9ba97da
 
 
cbc2faa
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from transformers import pipeline
import os
import gradio as gr
import torch
#from IPython.display import Audio as IPythonAudio
#from gtts import gTTS
#import IPython.display as ipd

#Audio to text
#asr = pipeline(task="automatic-speech-recognition",
#             model="distil-whisper/distil-small.en")
#Text to text
translator = pipeline(task="translation",
                      model="facebook/nllb-200-distilled-600M",
                      torch_dtype=torch.bfloat16) 
#Text to audio
pipe = pipeline("text-to-speech", model="suno/bark-small")

                
demo = gr.Blocks()
def transcribe_speech(filepath):
    if filepath is None:
        gr.Warning("No audio found, please retry.")
        return ""
    #asr(filepath)["text"]    
    output = translator(filepath,
                             src_lang="eng_Latn",
                             tgt_lang="hin_Deva")
    narrated_text=pipe(output[0]['translation_text'])
    #tts = gTTS(text=narrated_text, lang='hi', slow=False)
    #tts.save("translated_audio.mp3")

    #return ipd.Audio("translated_audio.mp3", autoplay=True)
    return narrated_text
    
mic_transcribe = gr.Interface(
    fn=transcribe_speech,
    #inputs=gr.Audio(sources="microphone",
    #                type="filepath"),
    inputs=gr.Textbox(label="text",lines=3),
    outputs="audio",
    #outputs=gr.Audio(label="Translated Message"),
    allow_flagging="never")

file_transcribe = gr.Interface(
    fn=transcribe_speech,
    #inputs=gr.Audio(sources="upload",
    #                type="filepath"),
    inputs=gr.Textbox(label="text",lines=3),
    outputs="audio",
    #outputs=gr.Audio(label="Translated Message"),
    allow_flagging="never"
)
with demo:
    gr.TabbedInterface(
        [mic_transcribe,
         file_transcribe],
        ["Transcribe Microphone",
         "Transcribe Audio File"],
    )

demo.launch(share=True)
demo.close()