hivecorp commited on
Commit
3cccab6
Β·
verified Β·
1 Parent(s): 2262b54

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -30
app.py CHANGED
@@ -8,49 +8,49 @@ import srt
8
  from pydub import AudioSegment, silence
9
  import datetime
10
 
11
- # πŸ“¦ Safe punkt download (for Hugging Face Spaces)
12
  nltk_data_path = os.path.join(os.path.expanduser("~"), "nltk_data")
13
  nltk.download("punkt", download_dir=nltk_data_path)
14
  nltk.data.path.append(nltk_data_path)
15
 
16
- # πŸ”Š Generate TTS audio
17
  async def text_to_speech(text, voice, rate, pitch):
18
  if not text.strip():
19
- return None, None, "Please enter text to convert."
20
  if not voice:
21
  return None, None, "Please select a voice."
22
 
23
- voice_short_name = voice.split(" - ")[0]
24
  rate_str = f"{rate:+d}%"
25
  pitch_str = f"{pitch:+d}Hz"
26
- communicate = edge_tts.Communicate(text, voice_short_name, rate=rate_str, pitch=pitch_str)
27
 
28
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
29
  tmp_path = tmp_file.name
30
  await communicate.save(tmp_path)
31
 
32
- # Generate SRT
33
  srt_path = generate_srt(tmp_path, text)
 
34
 
35
- return tmp_path, srt_path, None
36
-
37
- # 🧠 Generate SRT from audio + text
38
  def generate_srt(audio_path, text):
39
  audio = AudioSegment.from_file(audio_path)
40
- silence_ranges = silence.detect_silence(audio, min_silence_len=400, silence_thresh=audio.dBFS - 16)
41
- silence_ranges = [(start / 1000.0, end / 1000.0) for start, end in silence_ranges]
42
- sentences = nltk.tokenize.sent_tokenize(text)
43
 
 
44
  subtitles = []
45
  last_time = 0.0
 
46
  for i, sentence in enumerate(sentences):
47
- if i < len(silence_ranges):
48
  start = last_time
49
- end = silence_ranges[i][0]
50
- last_time = silence_ranges[i][1]
51
  else:
52
  start = last_time
53
- end = start + 2.5 # fallback timing
54
  subtitles.append(srt.Subtitle(
55
  index=i + 1,
56
  start=datetime.timedelta(seconds=start),
@@ -59,50 +59,49 @@ def generate_srt(audio_path, text):
59
  ))
60
 
61
  srt_data = srt.compose(subtitles)
62
- with tempfile.NamedTemporaryFile(delete=False, suffix=".srt", mode='w') as srt_file:
63
  srt_file.write(srt_data)
64
  return srt_file.name
65
 
66
- # πŸŽ›οΈ Interface wrapper
67
  async def tts_interface(text, voice, rate, pitch):
68
- audio, srt_file, warning = await text_to_speech(text, voice, rate, pitch)
69
- if warning:
70
- return None, None, gr.Warning(warning)
71
- return audio, srt_file, None
72
 
73
- # πŸ“‹ Setup Gradio UI
74
  async def create_demo():
75
  voices = await edge_tts.list_voices()
76
  voice_dict = {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
77
 
78
  with gr.Blocks() as demo:
79
- gr.Markdown("# πŸŽ™οΈ Edge TTS + Subtitle Generator (.srt)")
80
 
81
  with gr.Row():
82
  with gr.Column():
83
- text_input = gr.Textbox(label="Input Text", lines=5, placeholder="Enter your script here...")
84
- voice_dropdown = gr.Dropdown(choices=[""] + list(voice_dict.keys()), label="Select Voice", value="")
85
  rate_slider = gr.Slider(minimum=-50, maximum=50, value=0, label="Speech Rate (%)")
86
  pitch_slider = gr.Slider(minimum=-20, maximum=20, value=0, label="Pitch (Hz)")
87
  generate_btn = gr.Button("🎧 Generate Audio + SRT")
88
 
89
  with gr.Column():
90
  audio_output = gr.Audio(label="Generated Audio", type="filepath")
91
- srt_output = gr.File(label="Download .srt Subtitle")
92
- warning_output = gr.Markdown(visible=False)
93
 
94
  generate_btn.click(
95
  fn=tts_interface,
96
  inputs=[text_input, voice_dropdown, rate_slider, pitch_slider],
97
- outputs=[audio_output, srt_output, warning_output]
98
  )
99
 
100
  return demo
101
 
 
102
  async def main():
103
  demo = await create_demo()
104
  demo.queue()
105
- await demo.launch() # βœ… note: `await` is needed here
106
 
107
  if __name__ == "__main__":
108
  asyncio.run(main())
 
8
  from pydub import AudioSegment, silence
9
  import datetime
10
 
11
+ # πŸ“¦ Safe punkt download
12
  nltk_data_path = os.path.join(os.path.expanduser("~"), "nltk_data")
13
  nltk.download("punkt", download_dir=nltk_data_path)
14
  nltk.data.path.append(nltk_data_path)
15
 
16
+ # πŸ”Š Generate audio
17
  async def text_to_speech(text, voice, rate, pitch):
18
  if not text.strip():
19
+ return None, None, "Please enter some text."
20
  if not voice:
21
  return None, None, "Please select a voice."
22
 
23
+ voice_short = voice.split(" - ")[0]
24
  rate_str = f"{rate:+d}%"
25
  pitch_str = f"{pitch:+d}Hz"
26
+ communicate = edge_tts.Communicate(text, voice_short, rate=rate_str, pitch=pitch_str)
27
 
28
  with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file:
29
  tmp_path = tmp_file.name
30
  await communicate.save(tmp_path)
31
 
32
+ # SRT generation
33
  srt_path = generate_srt(tmp_path, text)
34
+ return tmp_path, srt_path, ""
35
 
36
+ # 🧠 Generate subtitles
 
 
37
  def generate_srt(audio_path, text):
38
  audio = AudioSegment.from_file(audio_path)
39
+ silences = silence.detect_silence(audio, min_silence_len=400, silence_thresh=audio.dBFS - 16)
40
+ silences = [(start / 1000.0, end / 1000.0) for start, end in silences]
 
41
 
42
+ sentences = nltk.tokenize.sent_tokenize(text)
43
  subtitles = []
44
  last_time = 0.0
45
+
46
  for i, sentence in enumerate(sentences):
47
+ if i < len(silences):
48
  start = last_time
49
+ end = silences[i][0]
50
+ last_time = silences[i][1]
51
  else:
52
  start = last_time
53
+ end = start + 2.5
54
  subtitles.append(srt.Subtitle(
55
  index=i + 1,
56
  start=datetime.timedelta(seconds=start),
 
59
  ))
60
 
61
  srt_data = srt.compose(subtitles)
62
+ with tempfile.NamedTemporaryFile(delete=False, suffix=".srt", mode="w") as srt_file:
63
  srt_file.write(srt_data)
64
  return srt_file.name
65
 
66
+ # Interface wrapper
67
  async def tts_interface(text, voice, rate, pitch):
68
+ audio_path, srt_path, message = await text_to_speech(text, voice, rate, pitch)
69
+ return audio_path, srt_path, message
 
 
70
 
71
+ # UI setup
72
  async def create_demo():
73
  voices = await edge_tts.list_voices()
74
  voice_dict = {f"{v['ShortName']} - {v['Locale']} ({v['Gender']})": v['ShortName'] for v in voices}
75
 
76
  with gr.Blocks() as demo:
77
+ gr.Markdown("# πŸŽ™οΈ Text-to-Speech + Subtitle Generator")
78
 
79
  with gr.Row():
80
  with gr.Column():
81
+ text_input = gr.Textbox(label="Input Text", lines=5)
82
+ voice_dropdown = gr.Dropdown(choices=[""] + list(voice_dict.keys()), label="Select Voice")
83
  rate_slider = gr.Slider(minimum=-50, maximum=50, value=0, label="Speech Rate (%)")
84
  pitch_slider = gr.Slider(minimum=-20, maximum=20, value=0, label="Pitch (Hz)")
85
  generate_btn = gr.Button("🎧 Generate Audio + SRT")
86
 
87
  with gr.Column():
88
  audio_output = gr.Audio(label="Generated Audio", type="filepath")
89
+ srt_output = gr.File(label="Download Subtitle (.srt)")
90
+ message_output = gr.Textbox(label="Status", interactive=False)
91
 
92
  generate_btn.click(
93
  fn=tts_interface,
94
  inputs=[text_input, voice_dropdown, rate_slider, pitch_slider],
95
+ outputs=[audio_output, srt_output, message_output]
96
  )
97
 
98
  return demo
99
 
100
+ # Entry point
101
  async def main():
102
  demo = await create_demo()
103
  demo.queue()
104
+ await demo.launch()
105
 
106
  if __name__ == "__main__":
107
  asyncio.run(main())