ChandJain commited on
Commit
b073d37
1 Parent(s): 02beba1

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -61
app.py DELETED
@@ -1,61 +0,0 @@
1
- from transformers import pipeline
2
- import os
3
- import gradio as gr
4
- import torch
5
- from IPython.display import Audio as IPythonAudio
6
- from gtts import gTTS
7
- import IPython.display as ipd
8
-
9
- #Audio to text
10
- asr = pipeline(task="automatic-speech-recognition",
11
- model="distil-whisper/distil-small.en")
12
- #Text to text
13
- translator = pipeline(task="translation",
14
- model="facebook/nllb-200-distilled-600M",
15
- torch_dtype=torch.bfloat16)
16
- #Text to audio
17
- pipe = pipeline("text-to-speech", model="suno/bark-small",
18
- torch_dtype=torch.bfloat16)
19
-
20
-
21
- demo = gr.Blocks()
22
- def transcribe_speech(filepath):
23
- if filepath is None:
24
- gr.Warning("No audio found, please retry.")
25
- return ""
26
- output = translator(asr(filepath)["text"],
27
- src_lang="eng_Latn",
28
- tgt_lang="hin_Deva")
29
- narrated_text=pipe(output[0]['translation_text'])
30
- #tts = gTTS(text=narrated_text, lang='hi', slow=False)
31
- #tts.save("translated_audio.mp3")
32
-
33
- #return ipd.Audio("translated_audio.mp3", autoplay=True)
34
- return narrated_text
35
-
36
- mic_transcribe = gr.Interface(
37
- fn=transcribe_speech,
38
- inputs=gr.Audio(sources="microphone",
39
- type="filepath"),
40
- #outputs="audio",
41
- outputs=gr.Audio(label="Translated Message"),
42
- allow_flagging="never")
43
-
44
- file_transcribe = gr.Interface(
45
- fn=transcribe_speech,
46
- inputs=gr.Audio(sources="upload",
47
- type="filepath"),
48
- #outputs="audio",
49
- outputs=gr.Audio(label="Translated Message"),
50
- allow_flagging="never"
51
- )
52
- with demo:
53
- gr.TabbedInterface(
54
- [mic_transcribe,
55
- file_transcribe],
56
- ["Transcribe Microphone",
57
- "Transcribe Audio File"],
58
- )
59
-
60
- demo.launch(share=True)
61
- demo.close()