Spaces:
Paused
Paused
artificialguybr
commited on
Commit
•
bea60dd
1
Parent(s):
c480a67
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import subprocess
|
3 |
+
import whisper
|
4 |
+
from googletrans import Translator
|
5 |
+
import asyncio
|
6 |
+
import edge_tts
|
7 |
+
import os
|
8 |
+
|
9 |
+
# Extract and Transcribe Audio
|
10 |
+
def extract_and_transcribe_audio(video_path):
|
11 |
+
ffmpeg_command = f"ffmpeg -i '{video_path}' -acodec pcm_s24le -ar 48000 -q:a 0 -map a -y 'output_audio.wav'"
|
12 |
+
subprocess.run(ffmpeg_command, shell=True)
|
13 |
+
model = whisper.load_model("base")
|
14 |
+
result = model.transcribe("output_audio.wav")
|
15 |
+
return result["text"], result['language']
|
16 |
+
|
17 |
+
# Translate Text
|
18 |
+
def translate_text(whisper_text, whisper_language, target_language):
|
19 |
+
language_mapping = {
|
20 |
+
'English': 'en',
|
21 |
+
'Spanish': 'es',
|
22 |
+
# ... (other mappings)
|
23 |
+
}
|
24 |
+
target_language_code = language_mapping[target_language]
|
25 |
+
translator = Translator()
|
26 |
+
translated_text = translator.translate(whisper_text, src=whisper_language, dest=target_language_code).text
|
27 |
+
return translated_text
|
28 |
+
|
29 |
+
# Generate Voice
|
30 |
+
async def generate_voice(translated_text, target_language):
|
31 |
+
VOICE_MAPPING = {
|
32 |
+
'English': 'en-GB-SoniaNeural',
|
33 |
+
'Spanish': 'es-ES-PabloNeural',
|
34 |
+
# ... (other mappings)
|
35 |
+
}
|
36 |
+
voice = VOICE_MAPPING[target_language]
|
37 |
+
communicate = edge_tts.Communicate(translated_text, voice)
|
38 |
+
await communicate.save("output_synth.wav")
|
39 |
+
return "output_synth.wav"
|
40 |
+
|
41 |
+
# Generate Lip-synced Video (Placeholder)
|
42 |
+
def generate_lip_synced_video(video_path, output_audio_path):
|
43 |
+
# Your lip-synced video generation code here
|
44 |
+
# ...
|
45 |
+
return "output_high_qual.mp4"
|
46 |
+
|
47 |
+
# Main function to be called by Gradio
|
48 |
+
def process_video(video, target_language):
|
49 |
+
video_path = "uploaded_video.mp4"
|
50 |
+
with open(video_path, "wb") as f:
|
51 |
+
f.write(video.read())
|
52 |
+
|
53 |
+
# Step 1: Extract and Transcribe Audio
|
54 |
+
whisper_text, whisper_language = extract_and_transcribe_audio(video_path)
|
55 |
+
|
56 |
+
# Step 2: Translate Text
|
57 |
+
translated_text = translate_text(whisper_text, whisper_language, target_language)
|
58 |
+
|
59 |
+
# Step 3: Generate Voice
|
60 |
+
loop = asyncio.get_event_loop()
|
61 |
+
output_audio_path = loop.run_until_complete(generate_voice(translated_text, target_language))
|
62 |
+
|
63 |
+
# Step 4: Generate Lip-synced Video
|
64 |
+
output_video_path = generate_lip_synced_video(video_path, output_audio_path)
|
65 |
+
|
66 |
+
return output_video_path
|
67 |
+
|
68 |
+
# Gradio Interface
|
69 |
+
iface = gr.Interface(
|
70 |
+
fn=process_video,
|
71 |
+
inputs=["file", gr.Interface.Component(type="dropdown", choices=["English", "Spanish"])],
|
72 |
+
outputs="file",
|
73 |
+
live=False
|
74 |
+
)
|
75 |
+
iface.launch()
|