Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,25 +1,32 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
import nltk
|
| 4 |
import srt
|
| 5 |
-
import
|
| 6 |
import datetime
|
| 7 |
|
| 8 |
-
nltk.download(
|
| 9 |
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
audio = AudioSegment.from_file(audio_file)
|
| 13 |
-
silence_thresh = audio.dBFS - 16
|
| 14 |
-
silences = silence.detect_silence(audio, min_silence_len=400, silence_thresh=silence_thresh)
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
sentences = nltk.tokenize.sent_tokenize(script_text)
|
| 21 |
|
| 22 |
-
# Distribute timing across sentences based on silence gaps
|
| 23 |
subtitles = []
|
| 24 |
last_time = 0.0
|
| 25 |
for i, sentence in enumerate(sentences):
|
|
@@ -29,55 +36,98 @@ def process_audio_and_script(audio_file, script_text):
|
|
| 29 |
last_time = silences[i][1]
|
| 30 |
else:
|
| 31 |
start = last_time
|
| 32 |
-
end = start + 2.5 # default
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
|
|
|
| 38 |
subtitles.append(subtitle)
|
| 39 |
|
| 40 |
-
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
-
# Interface
|
| 48 |
-
with gr.Blocks() as demo:
|
| 49 |
-
gr.Markdown("### 🎙️ Audio to Timed Subtitle (SRT) Generator with Waveform")
|
| 50 |
with gr.Row():
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
var wavesurfer = WaveSurfer.create({
|
| 62 |
-
container: '#waveform',
|
| 63 |
-
waveColor: '#999',
|
| 64 |
-
progressColor: '#333',
|
| 65 |
-
height: 100
|
| 66 |
-
});
|
| 67 |
-
wavesurfer.load("{audio_file}");
|
| 68 |
-
</script>
|
| 69 |
-
"""
|
| 70 |
-
|
| 71 |
-
waveform = gr.HTML()
|
| 72 |
|
| 73 |
with gr.Row():
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 78 |
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
|
|
|
|
|
|
|
|
|
| 82 |
|
| 83 |
-
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
import requests
|
| 3 |
+
import random
|
| 4 |
+
import urllib.parse
|
| 5 |
+
import tempfile
|
| 6 |
+
import os
|
| 7 |
import nltk
|
| 8 |
import srt
|
| 9 |
+
from pydub import AudioSegment, silence
|
| 10 |
import datetime
|
| 11 |
|
| 12 |
+
nltk.download("punkt")
|
| 13 |
|
| 14 |
+
NSFW_URL_TEMPLATE = os.getenv("NSFW_API_URL_TEMPLATE")
|
| 15 |
+
TTS_URL_TEMPLATE = os.getenv("TTS_API_URL_TEMPLATE")
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
+
if not NSFW_URL_TEMPLATE:
|
| 18 |
+
raise ValueError("Missing Secret: NSFW_API_URL_TEMPLATE is not set.")
|
| 19 |
+
if not TTS_URL_TEMPLATE:
|
| 20 |
+
raise ValueError("Missing Secret: TTS_API_URL_TEMPLATE is not set.")
|
| 21 |
|
| 22 |
+
VOICES = ["alloy", "echo", "fable", "onyx", "nova", "shimmer", "coral", "verse", "ballad", "ash", "sage", "amuch", "dan"]
|
| 23 |
+
|
| 24 |
+
def generate_srt(audio_path, script_text):
|
| 25 |
+
audio = AudioSegment.from_file(audio_path)
|
| 26 |
+
silences = silence.detect_silence(audio, min_silence_len=300, silence_thresh=audio.dBFS - 16)
|
| 27 |
+
silences = [(start / 1000.0, end / 1000.0) for start, end in silences]
|
| 28 |
sentences = nltk.tokenize.sent_tokenize(script_text)
|
| 29 |
|
|
|
|
| 30 |
subtitles = []
|
| 31 |
last_time = 0.0
|
| 32 |
for i, sentence in enumerate(sentences):
|
|
|
|
| 36 |
last_time = silences[i][1]
|
| 37 |
else:
|
| 38 |
start = last_time
|
| 39 |
+
end = start + 2.5 # default
|
| 40 |
+
subtitle = srt.Subtitle(
|
| 41 |
+
index=i + 1,
|
| 42 |
+
start=datetime.timedelta(seconds=start),
|
| 43 |
+
end=datetime.timedelta(seconds=end),
|
| 44 |
+
content=sentence
|
| 45 |
+
)
|
| 46 |
subtitles.append(subtitle)
|
| 47 |
|
| 48 |
+
srt_data = srt.compose(subtitles)
|
| 49 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".srt", mode='w') as srt_file:
|
| 50 |
+
srt_file.write(srt_data)
|
| 51 |
+
return srt_file.name
|
| 52 |
+
|
| 53 |
+
def check_nsfw(prompt: str) -> bool:
|
| 54 |
+
encoded_prompt = urllib.parse.quote(prompt)
|
| 55 |
+
url = NSFW_URL_TEMPLATE.format(prompt=encoded_prompt)
|
| 56 |
+
try:
|
| 57 |
+
response = requests.get(url, timeout=20)
|
| 58 |
+
result = response.text.strip().upper()
|
| 59 |
+
return result == "YES"
|
| 60 |
+
except:
|
| 61 |
+
raise gr.Error("Failed to check prompt safety.")
|
| 62 |
+
|
| 63 |
+
def generate_audio(prompt: str, voice: str, emotion: str, seed: int) -> bytes:
|
| 64 |
+
encoded_prompt = urllib.parse.quote(prompt)
|
| 65 |
+
encoded_emotion = urllib.parse.quote(emotion)
|
| 66 |
+
url = TTS_URL_TEMPLATE.format(prompt=encoded_prompt, emotion=encoded_emotion, voice=voice, seed=seed)
|
| 67 |
+
response = requests.get(url, timeout=60)
|
| 68 |
+
if 'audio' not in response.headers.get('content-type', ''):
|
| 69 |
+
raise gr.Error("Invalid audio response.")
|
| 70 |
+
return response.content
|
| 71 |
+
|
| 72 |
+
def text_to_speech_app(prompt, voice, emotion, use_random_seed, specific_seed, subtitle_script):
|
| 73 |
+
if not prompt or not voice:
|
| 74 |
+
raise gr.Error("Prompt and Voice are required.")
|
| 75 |
+
seed = random.randint(0, 2**32 - 1) if use_random_seed else int(specific_seed)
|
| 76 |
+
is_nsfw = False # You can enable this by: is_nsfw = check_nsfw(prompt)
|
| 77 |
|
| 78 |
+
if is_nsfw:
|
| 79 |
+
return None, None, "Prompt is flagged NSFW"
|
| 80 |
+
|
| 81 |
+
try:
|
| 82 |
+
audio_bytes = generate_audio(prompt, voice, emotion, seed)
|
| 83 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as temp_audio:
|
| 84 |
+
temp_audio.write(audio_bytes)
|
| 85 |
+
audio_path = temp_audio.name
|
| 86 |
+
|
| 87 |
+
srt_path = None
|
| 88 |
+
if subtitle_script.strip():
|
| 89 |
+
srt_path = generate_srt(audio_path, subtitle_script)
|
| 90 |
+
|
| 91 |
+
return audio_path, srt_path, f"Audio and SRT generated with seed {seed}"
|
| 92 |
+
except Exception as e:
|
| 93 |
+
return None, None, f"Error: {str(e)}"
|
| 94 |
+
|
| 95 |
+
def toggle_seed_input(use_random_seed):
|
| 96 |
+
return gr.update(visible=not use_random_seed, value=12345)
|
| 97 |
+
|
| 98 |
+
with gr.Blocks() as app:
|
| 99 |
+
gr.Markdown("## 🎙️ Advanced OpenAI TTS + Subtitle Generator")
|
| 100 |
|
|
|
|
|
|
|
|
|
|
| 101 |
with gr.Row():
|
| 102 |
+
with gr.Column(scale=2):
|
| 103 |
+
prompt_input = gr.Textbox(label="Prompt", placeholder="Enter your text...")
|
| 104 |
+
emotion_input = gr.Textbox(label="Emotion Style", placeholder="happy, sad, excited, calm...")
|
| 105 |
+
voice_dropdown = gr.Dropdown(label="Voice", choices=VOICES, value="alloy")
|
| 106 |
+
subtitle_script = gr.Textbox(label="Subtitle Script", lines=6, placeholder="Paste script here for SRT generation")
|
| 107 |
+
with gr.Column(scale=1):
|
| 108 |
+
random_seed_checkbox = gr.Checkbox(label="Use Random Seed", value=True)
|
| 109 |
+
seed_input = gr.Number(label="Specific Seed", value=12345, visible=False, precision=0)
|
| 110 |
+
|
| 111 |
+
submit_button = gr.Button("🎧 Generate Audio + Subtitles", variant="primary")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
|
| 113 |
with gr.Row():
|
| 114 |
+
audio_output = gr.Audio(label="Generated Audio", type="filepath")
|
| 115 |
+
srt_output = gr.File(label="Download SRT File")
|
| 116 |
+
status_output = gr.Textbox(label="Status")
|
| 117 |
+
|
| 118 |
+
random_seed_checkbox.change(
|
| 119 |
+
fn=toggle_seed_input,
|
| 120 |
+
inputs=[random_seed_checkbox],
|
| 121 |
+
outputs=[seed_input]
|
| 122 |
+
)
|
| 123 |
|
| 124 |
+
submit_button.click(
|
| 125 |
+
fn=text_to_speech_app,
|
| 126 |
+
inputs=[prompt_input, voice_dropdown, emotion_input, random_seed_checkbox, seed_input, subtitle_script],
|
| 127 |
+
outputs=[audio_output, srt_output, status_output],
|
| 128 |
+
concurrency_limit=30
|
| 129 |
+
)
|
| 130 |
|
| 131 |
+
if __name__ == "__main__":
|
| 132 |
+
if NSFW_URL_TEMPLATE and TTS_URL_TEMPLATE:
|
| 133 |
+
app.launch()
|