Spaces:
Running
Running
File size: 1,679 Bytes
346ee80 148f6c5 346ee80 e241281 346ee80 010a816 346ee80 8cab2fb 346ee80 010a816 346ee80 3b2268f 346ee80 becc06a 010a816 becc06a 010a816 346ee80 010a816 e241281 346ee80 010a816 346ee80 e241281 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import whisper
import deepl
import os
model = whisper.load_model("base")
deepl_auth_key = os.environ["Deepl_API"]
def translate(text, target_lang):
translator = deepl.Translator(deepl_auth_key)
translated_text = translator.translate_text(text, target_lang=target_lang)
return translated_text
def transcribe(audio):
# load audio and pad/trim it to fit 30 seconds
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
# make log-Mel spectrogram and move to the same device as the model
mel = whisper.log_mel_spectrogram(audio).to(model.device)
# detect the spoken language
_, probs = model.detect_language(mel)
print(f"Detected language: {max(probs, key=probs.get)}")
detect_lang = max(probs, key=probs.get)
# decode the audio
# options = whisper.DecodingOptions()
options = whisper.DecodingOptions(fp16 = False)
result = whisper.decode(model, mel, options)
# if detect_lang == "en":
# print("Text: ", result.text)
# translated_text = translate(result.text, "JA")
# print("translated_text: ", translated_text)
# generated_video = text_to_speech(translated_text)
# print("generated_video 01: ", generated_video)
# elif detect_lang == "ja":
# print("Text: ", result.text)
# translated_text = translate(result.text, "EN-US")
translated_text = translate(result.text, "JA")
return translated_text
import gradio as gr
title = 'Translator_Video'
inputs = gr.Video()
outputs = gr.Text()
interface = gr.Interface(title=title, fn=transcribe, inputs=inputs, outputs=outputs)
interface.launch(debug=True) |