frogcho123 commited on
Commit
292172d
1 Parent(s): 282ede3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -45
app.py CHANGED
@@ -3,56 +3,56 @@ import os
3
  import whisper
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
  from gtts import gTTS
6
- import numpy as np
7
 
8
- # Load models
9
- model_stt = whisper.load_model("base")
10
- model_translation = AutoModelForSeq2SeqLM.from_pretrained("alirezamsh/small100")
11
- tokenizer_translation = AutoTokenizer.from_pretrained("alirezamsh/small100")
12
 
13
- def speech_to_speech(input_audio, to_lang):
14
- # Save the uploaded audio file
15
- input_file = "input_audio" + os.path.splitext(input_audio.name)[1]
16
- input_audio.save(input_file)
17
 
18
- # Speech-to-Text (STT)
19
- audio = whisper.load_audio(input_file)
 
20
  audio = whisper.pad_or_trim(audio)
21
- mel = whisper.log_mel_spectrogram(audio).to(model_stt.device)
22
- _, probs = model_stt.detect_language(mel)
 
 
 
 
 
23
  options = whisper.DecodingOptions()
24
- result = whisper.decode(model_stt, mel, options)
25
  text = result.text
26
- lang = max(probs, key=probs.get)
27
 
28
- # Translate
29
- tokenizer_translation.src_lang = lang
30
- tokenizer_translation.tgt_lang = to_lang
31
- encoded_bg = tokenizer_translation(text, return_tensors="pt")
32
- generated_tokens = model_translation.generate(**encoded_bg)
33
- translated_text = tokenizer_translation.batch_decode(generated_tokens, skip_special_tokens=True)[0]
34
-
35
- # Text-to-Speech (TTS)
36
- tts = gTTS(text=translated_text, lang=to_lang)
37
- output_file = "output_audio.mp3"
38
- tts.save(output_file)
39
-
40
- # Load output audio as numpy array
41
- audio_np = np.array(output_file)
42
-
43
- return audio_np
44
-
45
- languages = ["ru", "fr", "es", "de"] # Example languages: Russian, French, Spanish, German
46
- file_input = gr.inputs.File(label="Upload Audio", accept="audio/*")
47
- dropdown = gr.inputs.Dropdown(languages, label="Translation Language")
48
- audio_output = gr.outputs.Audio(label="Translated Voice", type="numpy")
49
-
50
- gr.Interface(
51
- fn=speech_to_speech,
52
- inputs=[file_input, dropdown],
53
- outputs=audio_output,
54
- title="Speech-to-Speech Translator",
55
- description="Upload an audio file (MP3, WAV, or FLAC) and choose the target language for translation.",
56
- theme="default"
57
- ).launch()
58
 
 
 
3
  import whisper
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
  from gtts import gTTS
6
+ import IPython.display as ipd
7
 
8
+ # Load Whisper STT model
9
+ whisper_model = whisper.load_model("base")
 
 
10
 
11
+ # Load translation models
12
+ tokenizer = AutoTokenizer.from_pretrained("alirezamsh/small100")
13
+ model = AutoModelForSeq2SeqLM.from_pretrained("alirezamsh/small100")
 
14
 
15
+ def translate_speech(audio_file, target_lang):
16
+ # Load audio
17
+ audio = whisper.load_audio(audio_file)
18
  audio = whisper.pad_or_trim(audio)
19
+ mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
20
+
21
+ # Detect language
22
+ _, probs = whisper_model.detect_language(mel)
23
+ lang = max(probs, key=probs.get)
24
+
25
+ # Decode audio into text
26
  options = whisper.DecodingOptions()
27
+ result = whisper.decode(whisper_model, mel, options)
28
  text = result.text
 
29
 
30
+ # Translate text
31
+ tokenizer.src_lang = lang
32
+ encoded_text = tokenizer(text, return_tensors="pt")
33
+ generated_tokens = model.generate(**encoded_text)
34
+ translated_text = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
35
+
36
+ # Text-to-speech (TTS)
37
+ tts = gTTS(text=translated_text, lang=target_lang)
38
+ audio_path = "translated_audio.mp3"
39
+ tts.save(audio_path)
40
+
41
+ return audio_path
42
+
43
+ def translate_speech_interface(audio, target_lang):
44
+ audio_path = "recorded_audio.wav"
45
+ with open(audio_path, "wb") as f:
46
+ f.write(audio.read())
47
+
48
+ translated_audio = translate_speech(audio_path, target_lang)
49
+ translated_audio = open(translated_audio, "rb")
50
+
51
+ return translated_audio
52
+
53
+ # Define the Gradio interface
54
+ audio_recording = gr.inputs.Audio(source="microphone", type="wav", label="Record your speech")
55
+ target_language = gr.inputs.Dropdown(["en", "ru", "fr"], label="Target Language")
56
+ output_audio = gr.outputs.Audio(type="audio/mpeg", label="Translated Audio")
 
 
 
57
 
58
+ gr.Interface(fn=translate_speech_interface, inputs=[audio_recording, target_language], outputs=output_audio, title="Speech Translator").launch()