PlotweaverModel commited on
Commit
34ba0b7
·
verified ·
1 Parent(s): a25f155

File upload

Browse files
Files changed (2) hide show
  1. app.py +965 -0
  2. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,965 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Visual Storybook / Animated Audiobook Generator
3
+ Powered by Qwen3.5-Omni-Plus (narration) + HappyHorse 1.0 (video)
4
+
5
+ Features:
6
+ - English text input (paste, .txt, .pdf, .docx)
7
+ - Translates + narrates into 36 languages (preset or cloned voice)
8
+ - Generates AI video per scene via HappyHorse 1.0
9
+ - Three video modes: text-to-video, image-to-video, auto scene prompts
10
+ - Three HappyHorse API providers: fal.ai, happyhorse.app, DashScope
11
+ - Combines narrated audio + video scenes into final MP4
12
+
13
+ Deploy as a Hugging Face Space:
14
+ 1. Create a new Space (SDK: Gradio)
15
+ 2. Upload app.py and requirements.txt
16
+ 3. Add secrets: DASHSCOPE_API_KEY (required for audio)
17
+ Plus one of: FAL_API_KEY, HAPPYHORSE_API_KEY, or use DashScope for video too
18
+ """
19
+
20
+ import os
21
+ import base64
22
+ import json
23
+ import pathlib
24
+ import shutil
25
+ import struct
26
+ import subprocess
27
+ import tempfile
28
+ import time
29
+ import re
30
+
31
+ import gradio as gr
32
+ import requests as http_requests
33
+ from openai import OpenAI
34
+
35
+ # Optional document parsers
36
+ try:
37
+ import pypdf
38
+ HAS_PYPDF = True
39
+ except ImportError:
40
+ HAS_PYPDF = False
41
+
42
+ try:
43
+ import docx
44
+ HAS_DOCX = True
45
+ except ImportError:
46
+ HAS_DOCX = False
47
+
48
+
49
+ # ==========================================
50
+ # CONFIGURATION
51
+ # ==========================================
52
+ OMNI_MODEL = "qwen3.5-omni-plus"
53
+ TTS_VC_MODEL = "qwen3-tts-vc-2026-01-22"
54
+ VOICE_CLONE_MODEL = "qwen-voice-enrollment"
55
+
56
+ DASHSCOPE_BASE_URL = "https://dashscope-intl.aliyuncs.com/compatible-mode/v1"
57
+ DASHSCOPE_API_URL = "https://dashscope-intl.aliyuncs.com/api/v1"
58
+ VOICE_CLONE_URL = f"{DASHSCOPE_API_URL}/services/audio/tts/customization"
59
+ TTS_SYNTHESIS_URL = f"{DASHSCOPE_API_URL}/services/aigc/multimodal-generation/generation"
60
+
61
+ # HappyHorse API endpoints per provider
62
+ HAPPYHORSE_PROVIDERS = {
63
+ "fal.ai": {
64
+ "t2v": "https://queue.fal.run/fal-ai/alibaba/happy-horse/text-to-video",
65
+ "i2v": "https://queue.fal.run/fal-ai/alibaba/happy-horse/image-to-video",
66
+ "status_base": "https://queue.fal.run/fal-ai/alibaba/happy-horse",
67
+ "key_env": "FAL_API_KEY",
68
+ "auth_header": "Authorization",
69
+ "auth_prefix": "Key ",
70
+ },
71
+ "happyhorse.app": {
72
+ "generate": "https://happyhorse.app/api/generate",
73
+ "status": "https://happyhorse.app/api/status",
74
+ "key_env": "HAPPYHORSE_API_KEY",
75
+ "auth_header": "Authorization",
76
+ "auth_prefix": "Bearer ",
77
+ },
78
+ "DashScope (Bailian)": {
79
+ "generate": f"{DASHSCOPE_API_URL}/services/aigc/video-generation/generation",
80
+ "status": f"{DASHSCOPE_API_URL}/tasks",
81
+ "key_env": "DASHSCOPE_API_KEY",
82
+ "auth_header": "Authorization",
83
+ "auth_prefix": "Bearer ",
84
+ },
85
+ }
86
+
87
+ MAX_CHARS_PER_CHUNK = 1200 # Slightly shorter for scene-based splitting
88
+
89
+ LANGUAGES = {
90
+ "English": {"code": "en", "native": "English", "tier": "core"},
91
+ "Chinese (Mandarin)": {"code": "zh", "native": "Chinese", "tier": "core"},
92
+ "Japanese": {"code": "ja", "native": "Japanese", "tier": "core"},
93
+ "Korean": {"code": "ko", "native": "Korean", "tier": "core"},
94
+ "German": {"code": "de", "native": "Deutsch", "tier": "core"},
95
+ "French": {"code": "fr", "native": "Francais", "tier": "core"},
96
+ "Russian": {"code": "ru", "native": "Russian", "tier": "core"},
97
+ "Portuguese": {"code": "pt", "native": "Portugues", "tier": "core"},
98
+ "Spanish": {"code": "es", "native": "Espanol", "tier": "core"},
99
+ "Italian": {"code": "it", "native": "Italiano", "tier": "core"},
100
+ "Arabic": {"code": "ar", "native": "Arabic", "tier": "extended"},
101
+ "Dutch": {"code": "nl", "native": "Nederlands", "tier": "extended"},
102
+ "Polish": {"code": "pl", "native": "Polski", "tier": "extended"},
103
+ "Turkish": {"code": "tr", "native": "Turkce", "tier": "extended"},
104
+ "Vietnamese": {"code": "vi", "native": "Tieng Viet", "tier": "extended"},
105
+ "Thai": {"code": "th", "native": "Thai", "tier": "extended"},
106
+ "Indonesian": {"code": "id", "native": "Bahasa Indonesia", "tier": "extended"},
107
+ "Hindi": {"code": "hi", "native": "Hindi", "tier": "extended"},
108
+ "Swahili": {"code": "sw", "native": "Kiswahili", "tier": "extended"},
109
+ "Tamil": {"code": "ta", "native": "Tamil", "tier": "extended"},
110
+ }
111
+
112
+ VOICE_CLONE_LANGUAGES = {
113
+ "English", "Chinese (Mandarin)", "Japanese", "Korean", "German",
114
+ "French", "Russian", "Portuguese", "Spanish", "Italian",
115
+ }
116
+
117
+ PRESET_VOICES = [
118
+ "Cherry -- Sunny, friendly",
119
+ "Serena -- Gentle, soft",
120
+ "Jennifer -- Cinematic narrator",
121
+ "Katerina -- Mature, rich rhythm",
122
+ "Ethan -- Warm, energetic",
123
+ "Ryan -- Dramatic, rhythmic",
124
+ "Kai -- Soothing, calm",
125
+ "Neil -- Precise, clear",
126
+ "Lenn -- Rational, steady",
127
+ "Eldric Sage -- Authoritative narrator",
128
+ "Arthur -- Classic, mature",
129
+ "Bella -- Elegant, warm",
130
+ ]
131
+
132
+
133
+ def get_voice_name(label):
134
+ return label.split("--")[0].strip()
135
+
136
+
137
+ # ==========================================
138
+ # AUDIO HELPERS
139
+ # ==========================================
140
+ def base64_to_wav(b64_data, output_path):
141
+ audio_bytes = base64.b64decode(b64_data)
142
+ sr, nc, bps = 24000, 1, 16
143
+ br = sr * nc * bps // 8
144
+ ba = nc * bps // 8
145
+ ds = len(audio_bytes)
146
+ with open(output_path, "wb") as f:
147
+ f.write(b"RIFF")
148
+ f.write(struct.pack("<I", 36 + ds))
149
+ f.write(b"WAVE")
150
+ f.write(b"fmt ")
151
+ f.write(struct.pack("<I", 16))
152
+ f.write(struct.pack("<H", 1))
153
+ f.write(struct.pack("<H", nc))
154
+ f.write(struct.pack("<I", sr))
155
+ f.write(struct.pack("<I", br))
156
+ f.write(struct.pack("<H", ba))
157
+ f.write(struct.pack("<H", bps))
158
+ f.write(b"data")
159
+ f.write(struct.pack("<I", ds))
160
+ f.write(audio_bytes)
161
+
162
+
163
+ def get_audio_duration(filepath):
164
+ result = subprocess.run(
165
+ ["ffprobe", "-v", "quiet", "-show_entries", "format=duration",
166
+ "-of", "default=noprint_wrappers=1:nokey=1", filepath],
167
+ capture_output=True, text=True,
168
+ )
169
+ return float(result.stdout.strip())
170
+
171
+
172
+ def concatenate_media(files, output_path, media_type="audio"):
173
+ if not files:
174
+ return
175
+ if len(files) == 1:
176
+ shutil.copy2(files[0], output_path)
177
+ return
178
+ list_file = output_path + ".txt"
179
+ with open(list_file, "w") as f:
180
+ for fp in files:
181
+ f.write(f"file '{fp}'\n")
182
+ subprocess.run(
183
+ ["ffmpeg", "-y", "-f", "concat", "-safe", "0",
184
+ "-i", list_file, "-c", "copy", output_path],
185
+ capture_output=True, check=True,
186
+ )
187
+ os.remove(list_file)
188
+
189
+
190
+ def generate_silence(duration_sec, output_path):
191
+ subprocess.run(
192
+ ["ffmpeg", "-y", "-f", "lavfi", "-i", "anullsrc=r=24000:cl=mono",
193
+ "-t", str(duration_sec), "-acodec", "pcm_s16le", output_path],
194
+ capture_output=True, check=True,
195
+ )
196
+
197
+
198
+ # ==========================================
199
+ # DOCUMENT EXTRACTION
200
+ # ==========================================
201
+ def extract_text_from_file(filepath):
202
+ ext = os.path.splitext(filepath)[1].lower()
203
+ if ext == ".pdf":
204
+ if not HAS_PYPDF:
205
+ raise gr.Error("pypdf not installed.")
206
+ reader = pypdf.PdfReader(filepath)
207
+ return "\n\n".join(p.extract_text().strip() for p in reader.pages if p.extract_text())
208
+ elif ext in (".docx", ".doc"):
209
+ if ext == ".doc":
210
+ raise gr.Error("Please save as .docx or .pdf.")
211
+ if not HAS_DOCX:
212
+ raise gr.Error("python-docx not installed.")
213
+ doc = docx.Document(filepath)
214
+ return "\n\n".join(p.text.strip() for p in doc.paragraphs if p.text.strip())
215
+ else:
216
+ with open(filepath, "r", encoding="utf-8", errors="replace") as f:
217
+ return f.read()
218
+
219
+
220
+ # ==========================================
221
+ # TEXT SPLITTING INTO SCENES
222
+ # ==========================================
223
+ def split_into_scenes(text, max_chars=MAX_CHARS_PER_CHUNK):
224
+ text = text.strip()
225
+ if not text:
226
+ return []
227
+ if len(text) <= max_chars:
228
+ return [text]
229
+
230
+ chunks = []
231
+ paragraphs = re.split(r"\n\s*\n", text)
232
+ current = ""
233
+
234
+ for para in paragraphs:
235
+ para = para.strip()
236
+ if not para:
237
+ continue
238
+ if len(current) + len(para) + 2 <= max_chars:
239
+ current = (current + "\n\n" + para).strip()
240
+ else:
241
+ if current:
242
+ chunks.append(current)
243
+ if len(para) > max_chars:
244
+ sentences = re.split(r"(?<=[.!?])\s+", para)
245
+ current = ""
246
+ for s in sentences:
247
+ if len(current) + len(s) + 1 <= max_chars:
248
+ current = (current + " " + s).strip()
249
+ else:
250
+ if current:
251
+ chunks.append(current)
252
+ current = s
253
+ else:
254
+ current = para
255
+
256
+ if current:
257
+ chunks.append(current)
258
+ return chunks
259
+
260
+
261
+ # ==========================================
262
+ # SCENE PROMPT GENERATION (via Qwen)
263
+ # ==========================================
264
+ def generate_scene_prompt(client, scene_text, scene_index):
265
+ """Use Qwen to create a cinematic video prompt from story text."""
266
+ try:
267
+ response = client.chat.completions.create(
268
+ model=OMNI_MODEL,
269
+ modalities=["text"],
270
+ messages=[
271
+ {
272
+ "role": "system",
273
+ "content": (
274
+ "You are a cinematic scene director. Given a passage from a story, "
275
+ "create a single vivid video generation prompt (max 200 words) that "
276
+ "describes the visual scene. Include: setting, lighting, camera angle, "
277
+ "character actions, mood. Do NOT include dialogue or text overlays. "
278
+ "Output ONLY the video prompt, nothing else."
279
+ ),
280
+ },
281
+ {
282
+ "role": "user",
283
+ "content": f"Create a cinematic video prompt for this scene:\n\n{scene_text[:1500]}",
284
+ },
285
+ ],
286
+ )
287
+ return response.choices[0].message.content.strip()
288
+ except Exception as e:
289
+ # Fallback: use first 200 chars as prompt
290
+ return scene_text[:200]
291
+
292
+
293
+ # ==========================================
294
+ # VOICE CLONING
295
+ # ==========================================
296
+ def prepare_clone_audio(audio_path):
297
+ result = subprocess.run(
298
+ ["ffprobe", "-v", "quiet", "-show_entries", "format=duration",
299
+ "-of", "default=noprint_wrappers=1:nokey=1", audio_path],
300
+ capture_output=True, text=True,
301
+ )
302
+ duration = float(result.stdout.strip())
303
+
304
+ if duration < 10:
305
+ raise ValueError(f"Audio too short ({duration:.1f}s). Need at least 10 seconds.")
306
+
307
+ tmp_prepared = audio_path + "_prepared.wav"
308
+ if duration <= 60:
309
+ subprocess.run(
310
+ ["ffmpeg", "-y", "-i", audio_path, "-ar", "24000", "-ac", "1",
311
+ "-acodec", "pcm_s16le", tmp_prepared],
312
+ capture_output=True, check=True,
313
+ )
314
+ else:
315
+ start = min(5, duration - 60)
316
+ subprocess.run(
317
+ ["ffmpeg", "-y", "-ss", str(start), "-t", "60", "-i", audio_path,
318
+ "-ar", "24000", "-ac", "1", "-acodec", "pcm_s16le", tmp_prepared],
319
+ capture_output=True, check=True,
320
+ )
321
+ return tmp_prepared
322
+
323
+
324
+ def clone_voice(audio_path, api_key):
325
+ prepared = prepare_clone_audio(audio_path)
326
+ b64 = base64.b64encode(pathlib.Path(prepared).read_bytes()).decode()
327
+ try:
328
+ os.remove(prepared)
329
+ except OSError:
330
+ pass
331
+
332
+ resp = http_requests.post(VOICE_CLONE_URL, json={
333
+ "model": VOICE_CLONE_MODEL,
334
+ "input": {
335
+ "action": "create",
336
+ "target_model": TTS_VC_MODEL,
337
+ "preferred_name": "storybook_voice",
338
+ "audio": {"data": f"data:audio/wav;base64,{b64}"},
339
+ },
340
+ }, headers={
341
+ "Authorization": f"Bearer {api_key}",
342
+ "Content-Type": "application/json",
343
+ }, timeout=60)
344
+
345
+ if resp.status_code != 200:
346
+ raise RuntimeError(f"Voice clone failed: {resp.text[:300]}")
347
+ return resp.json()["output"]["voice"]
348
+
349
+
350
+ # ==========================================
351
+ # AUDIO NARRATION
352
+ # ==========================================
353
+ def narrate_scene_preset(client, text, voice, language, lang_config, translate):
354
+ """Narrate with preset voice via Qwen3.5-Omni-Plus. Returns (audio_b64_parts, transcript)."""
355
+ if translate and language != "English":
356
+ sys_prompt = (
357
+ f"You are a professional audiobook narrator. "
358
+ f"Translate the English text into {language} ({lang_config['native']}) "
359
+ f"and narrate it expressively. Respond ONLY with the spoken {language} narration."
360
+ )
361
+ user_text = f"Translate into {language} and narrate:\n\n{text}"
362
+ else:
363
+ sys_prompt = "You are a professional audiobook narrator. Read expressively. Respond ONLY with narration."
364
+ user_text = f"Narrate:\n\n{text}"
365
+
366
+ completion = client.chat.completions.create(
367
+ model=OMNI_MODEL,
368
+ messages=[
369
+ {"role": "system", "content": sys_prompt},
370
+ {"role": "user", "content": user_text},
371
+ ],
372
+ modalities=["text", "audio"],
373
+ audio={"voice": voice, "format": "wav"},
374
+ stream=True,
375
+ stream_options={"include_usage": True},
376
+ )
377
+
378
+ audio_parts, text_parts = [], []
379
+ for event in completion:
380
+ if not event.choices:
381
+ continue
382
+ delta = event.choices[0].delta
383
+ if hasattr(delta, "content") and delta.content:
384
+ text_parts.append(delta.content)
385
+ if hasattr(delta, "audio") and delta.audio:
386
+ if isinstance(delta.audio, dict) and "data" in delta.audio:
387
+ audio_parts.append(delta.audio["data"])
388
+ elif hasattr(delta.audio, "data") and delta.audio.data:
389
+ audio_parts.append(delta.audio.data)
390
+
391
+ return "".join(audio_parts), "".join(text_parts)
392
+
393
+
394
+ def narrate_scene_cloned(client, text, voice_id, language, lang_config, translate, api_key):
395
+ """Translate then synthesize with cloned voice. Returns (audio_url, transcript)."""
396
+ final_text = text
397
+ if translate and language != "English":
398
+ resp = client.chat.completions.create(
399
+ model=OMNI_MODEL, modalities=["text"],
400
+ messages=[
401
+ {"role": "system", "content": f"Translate English to {language}. Output ONLY the translation."},
402
+ {"role": "user", "content": f"Translate:\n\n{text}"},
403
+ ],
404
+ )
405
+ final_text = resp.choices[0].message.content.strip()
406
+
407
+ lang_map = {
408
+ "English": "English", "Chinese (Mandarin)": "Chinese", "Japanese": "Japanese",
409
+ "Korean": "Korean", "German": "German", "French": "French",
410
+ "Russian": "Russian", "Portuguese": "Portuguese", "Spanish": "Spanish", "Italian": "Italian",
411
+ }
412
+
413
+ resp = http_requests.post(TTS_SYNTHESIS_URL, json={
414
+ "model": TTS_VC_MODEL,
415
+ "input": {"text": final_text, "voice": voice_id, "language_type": lang_map.get(language, "English")},
416
+ }, headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}, timeout=120)
417
+
418
+ if resp.status_code != 200:
419
+ return None, final_text
420
+
421
+ audio_url = resp.json().get("output", {}).get("audio", {}).get("url")
422
+ return audio_url, final_text
423
+
424
+
425
+ # ==========================================
426
+ # HAPPYHORSE VIDEO GENERATION
427
+ # ==========================================
428
+ def generate_video_fal(prompt, api_key, duration=5, aspect="16:9", image_url=None):
429
+ """Generate video via fal.ai."""
430
+ headers = {"Authorization": f"Key {api_key}", "Content-Type": "application/json"}
431
+
432
+ if image_url:
433
+ payload = {"image_url": image_url, "prompt": prompt, "duration": duration, "aspect_ratio": aspect}
434
+ url = HAPPYHORSE_PROVIDERS["fal.ai"]["i2v"]
435
+ else:
436
+ payload = {"prompt": prompt, "duration": duration, "aspect_ratio": aspect}
437
+ url = HAPPYHORSE_PROVIDERS["fal.ai"]["t2v"]
438
+
439
+ resp = http_requests.post(url, json=payload, headers=headers, timeout=30)
440
+ if resp.status_code not in (200, 201):
441
+ raise RuntimeError(f"fal.ai submit failed: {resp.status_code} {resp.text[:200]}")
442
+
443
+ data = resp.json()
444
+ # fal uses queue -- check for request_id
445
+ request_id = data.get("request_id")
446
+ if request_id:
447
+ status_url = f"{url}/requests/{request_id}/status"
448
+ result_url = f"{url}/requests/{request_id}"
449
+ for _ in range(120):
450
+ time.sleep(10)
451
+ s = http_requests.get(status_url, headers=headers, timeout=30).json()
452
+ if s.get("status") == "COMPLETED":
453
+ r = http_requests.get(result_url, headers=headers, timeout=30).json()
454
+ video_url = r.get("video", {}).get("url")
455
+ return video_url
456
+ elif s.get("status") == "FAILED":
457
+ raise RuntimeError(f"fal.ai generation failed")
458
+ raise RuntimeError("fal.ai timeout")
459
+ else:
460
+ return data.get("video", {}).get("url")
461
+
462
+
463
+ def generate_video_happyhorse_app(prompt, api_key, duration=5, aspect="16:9", image_url=None):
464
+ """Generate video via happyhorse.app."""
465
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
466
+ payload = {
467
+ "model": "happyhorse-1.0/video",
468
+ "prompt": prompt,
469
+ "mode": "pro",
470
+ "duration": duration,
471
+ "aspect_ratio": aspect,
472
+ "sound": False,
473
+ }
474
+ if image_url:
475
+ payload["image_urls"] = [image_url]
476
+
477
+ resp = http_requests.post(
478
+ HAPPYHORSE_PROVIDERS["happyhorse.app"]["generate"],
479
+ json=payload, headers=headers, timeout=30,
480
+ )
481
+ if resp.status_code != 200:
482
+ raise RuntimeError(f"happyhorse.app submit failed: {resp.text[:200]}")
483
+
484
+ task_id = resp.json().get("data", {}).get("task_id")
485
+ if not task_id:
486
+ raise RuntimeError("No task_id returned")
487
+
488
+ status_url = HAPPYHORSE_PROVIDERS["happyhorse.app"]["status"]
489
+ for _ in range(120):
490
+ time.sleep(10)
491
+ s = http_requests.get(f"{status_url}?task_id={task_id}", headers=headers, timeout=30).json()
492
+ status = s.get("data", {}).get("status", "")
493
+ if status == "SUCCESS":
494
+ urls = s.get("data", {}).get("response", {}).get("resultUrls", [])
495
+ return urls[0] if urls else None
496
+ elif status == "FAILED":
497
+ raise RuntimeError(f"happyhorse.app failed: {s.get('data', {}).get('error_message', '')}")
498
+ raise RuntimeError("happyhorse.app timeout")
499
+
500
+
501
+ def generate_video_dashscope(prompt, api_key, duration=5, aspect="16:9", image_url=None):
502
+ """Generate video via DashScope Bailian (async task API)."""
503
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json", "X-DashScope-Async": "enable"}
504
+ payload = {
505
+ "model": "happyhorse-1.0",
506
+ "input": {"prompt": prompt},
507
+ "parameters": {"duration": duration, "aspect_ratio": aspect},
508
+ }
509
+ if image_url:
510
+ payload["input"]["image_url"] = image_url
511
+
512
+ resp = http_requests.post(
513
+ HAPPYHORSE_PROVIDERS["DashScope (Bailian)"]["generate"],
514
+ json=payload, headers=headers, timeout=30,
515
+ )
516
+ if resp.status_code != 200:
517
+ raise RuntimeError(f"DashScope submit failed: {resp.text[:200]}")
518
+
519
+ task_id = resp.json().get("output", {}).get("task_id")
520
+ if not task_id:
521
+ raise RuntimeError("No task_id returned from DashScope")
522
+
523
+ for _ in range(120):
524
+ time.sleep(10)
525
+ s = http_requests.get(
526
+ f"{HAPPYHORSE_PROVIDERS['DashScope (Bailian)']['status']}/{task_id}",
527
+ headers={"Authorization": f"Bearer {api_key}"}, timeout=30,
528
+ ).json()
529
+ status = s.get("output", {}).get("task_status", "")
530
+ if status == "SUCCEEDED":
531
+ results = s.get("output", {}).get("results", [])
532
+ return results[0].get("url") if results else None
533
+ elif status == "FAILED":
534
+ raise RuntimeError(f"DashScope failed: {s.get('output', {}).get('message', '')}")
535
+ raise RuntimeError("DashScope timeout")
536
+
537
+
538
+ def generate_video(prompt, provider, api_key, duration=5, aspect="16:9", image_url=None):
539
+ """Route to the correct provider."""
540
+ if provider == "fal.ai":
541
+ return generate_video_fal(prompt, api_key, duration, aspect, image_url)
542
+ elif provider == "happyhorse.app":
543
+ return generate_video_happyhorse_app(prompt, api_key, duration, aspect, image_url)
544
+ elif provider == "DashScope (Bailian)":
545
+ return generate_video_dashscope(prompt, api_key, duration, aspect, image_url)
546
+ else:
547
+ raise ValueError(f"Unknown provider: {provider}")
548
+
549
+
550
+ def download_video(url, output_path):
551
+ """Download a video from URL."""
552
+ resp = http_requests.get(url, timeout=120, stream=True)
553
+ if resp.status_code != 200:
554
+ raise RuntimeError(f"Failed to download video: {resp.status_code}")
555
+ with open(output_path, "wb") as f:
556
+ for chunk in resp.iter_content(chunk_size=8192):
557
+ f.write(chunk)
558
+ return output_path
559
+
560
+
561
+ # ==========================================
562
+ # VIDEO + AUDIO COMPOSITING
563
+ # ==========================================
564
+ def create_scene_video(video_path, audio_path, output_path):
565
+ """Combine a video clip with audio. Loop/trim video to match audio duration."""
566
+ audio_dur = get_audio_duration(audio_path)
567
+
568
+ # Create a video that matches audio length (loop if needed, trim if longer)
569
+ subprocess.run([
570
+ "ffmpeg", "-y",
571
+ "-stream_loop", "-1", "-i", video_path,
572
+ "-i", audio_path,
573
+ "-map", "0:v:0", "-map", "1:a:0",
574
+ "-c:v", "libx264", "-preset", "fast", "-crf", "23",
575
+ "-c:a", "aac", "-b:a", "128k",
576
+ "-t", str(audio_dur),
577
+ "-shortest",
578
+ output_path,
579
+ ], capture_output=True, check=True)
580
+ return output_path
581
+
582
+
583
+ def concatenate_videos(video_files, output_path):
584
+ """Concatenate multiple MP4 files."""
585
+ if len(video_files) == 1:
586
+ shutil.copy2(video_files[0], output_path)
587
+ return
588
+
589
+ # Re-encode to ensure compatible streams
590
+ normalized = []
591
+ for i, vf in enumerate(video_files):
592
+ norm = vf.replace(".mp4", f"_norm_{i}.mp4")
593
+ subprocess.run([
594
+ "ffmpeg", "-y", "-i", vf,
595
+ "-c:v", "libx264", "-preset", "fast", "-crf", "23",
596
+ "-c:a", "aac", "-b:a", "128k",
597
+ "-ar", "24000", "-ac", "1",
598
+ "-r", "24",
599
+ "-s", "1280x720",
600
+ norm,
601
+ ], capture_output=True, check=True)
602
+ normalized.append(norm)
603
+
604
+ list_file = output_path + ".txt"
605
+ with open(list_file, "w") as f:
606
+ for vf in normalized:
607
+ f.write(f"file '{vf}'\n")
608
+
609
+ subprocess.run([
610
+ "ffmpeg", "-y", "-f", "concat", "-safe", "0",
611
+ "-i", list_file, "-c", "copy", output_path,
612
+ ], capture_output=True, check=True)
613
+ os.remove(list_file)
614
+
615
+
616
+ # ==========================================
617
+ # MAIN PIPELINE
618
+ # ==========================================
619
+ def generate_storybook(
620
+ text_input, file_input, target_language, voice_mode, preset_voice_label,
621
+ clone_audio, video_provider, video_mode, scene_images, aspect_ratio,
622
+ video_duration, progress=gr.Progress(),
623
+ ):
624
+ # -- Resolve text --
625
+ if file_input is not None:
626
+ progress(0.01, desc="Extracting text...")
627
+ text = extract_text_from_file(file_input)
628
+ elif text_input and text_input.strip():
629
+ text = text_input.strip()
630
+ else:
631
+ raise gr.Error("Please provide text or upload a file.")
632
+
633
+ if len(text) < 20:
634
+ raise gr.Error("Text is too short for a storybook.")
635
+
636
+ # -- API keys --
637
+ ds_key = os.environ.get("DASHSCOPE_API_KEY", "")
638
+ if not ds_key:
639
+ raise gr.Error("DASHSCOPE_API_KEY not set (needed for audio narration).")
640
+
641
+ provider_config = HAPPYHORSE_PROVIDERS[video_provider]
642
+ video_key = os.environ.get(provider_config["key_env"], "")
643
+ if not video_key:
644
+ raise gr.Error(f"{provider_config['key_env']} not set. Add it in Settings > Secrets.")
645
+
646
+ lang_config = LANGUAGES[target_language]
647
+ use_clone = voice_mode == "Clone a Voice"
648
+ translate = target_language != "English"
649
+ client = OpenAI(api_key=ds_key, base_url=DASHSCOPE_BASE_URL)
650
+ tmp_dir = tempfile.mkdtemp(prefix="storybook_")
651
+
652
+ # -- Voice cloning --
653
+ cloned_voice_id = None
654
+ if use_clone:
655
+ if clone_audio is None:
656
+ raise gr.Error("Upload a voice sample for cloning.")
657
+ if target_language not in VOICE_CLONE_LANGUAGES:
658
+ raise gr.Error(f"Voice cloning only supports: {', '.join(sorted(VOICE_CLONE_LANGUAGES))}")
659
+ progress(0.03, desc="Cloning voice...")
660
+ cloned_voice_id = clone_voice(clone_audio, ds_key)
661
+
662
+ # -- Parse scene images if provided --
663
+ image_list = []
664
+ if scene_images:
665
+ image_list = scene_images if isinstance(scene_images, list) else [scene_images]
666
+
667
+ try:
668
+ # -- Split into scenes --
669
+ progress(0.05, desc="Splitting text into scenes...")
670
+ scenes = split_into_scenes(text)
671
+ total = len(scenes)
672
+ scene_videos = []
673
+ all_transcripts = []
674
+
675
+ for i, scene_text in enumerate(scenes):
676
+ base_frac = 0.08 + 0.85 * (i / total)
677
+
678
+ # --- Step 1: Generate video prompt ---
679
+ progress(base_frac, desc=f"Scene {i+1}/{total}: Creating video prompt...")
680
+ if video_mode == "Text-to-Video (auto scene prompts)":
681
+ video_prompt = generate_scene_prompt(client, scene_text, i)
682
+ image_url = None
683
+ elif video_mode == "Image-to-Video (animate uploaded images)":
684
+ video_prompt = generate_scene_prompt(client, scene_text, i)
685
+ # Use matching image if available, otherwise cycle through
686
+ if image_list:
687
+ img_index = i % len(image_list)
688
+ img_path = image_list[img_index]
689
+ # Upload image and get URL -- for now, encode as data URI
690
+ # Note: some providers need a public URL; we base64 encode
691
+ img_b64 = base64.b64encode(pathlib.Path(img_path).read_bytes()).decode()
692
+ ext = pathlib.Path(img_path).suffix.lower()
693
+ mime = {".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".png": "image/png", ".webp": "image/webp"}.get(ext, "image/jpeg")
694
+ image_url = f"data:{mime};base64,{img_b64}"
695
+ else:
696
+ image_url = None
697
+ else: # Both
698
+ video_prompt = generate_scene_prompt(client, scene_text, i)
699
+ if image_list and i < len(image_list):
700
+ img_path = image_list[i]
701
+ img_b64 = base64.b64encode(pathlib.Path(img_path).read_bytes()).decode()
702
+ ext = pathlib.Path(img_path).suffix.lower()
703
+ mime = {".jpg": "image/jpeg", ".jpeg": "image/jpeg", ".png": "image/png"}.get(ext, "image/jpeg")
704
+ image_url = f"data:{mime};base64,{img_b64}"
705
+ else:
706
+ image_url = None
707
+
708
+ # --- Step 2: Generate video ---
709
+ progress(base_frac + 0.02, desc=f"Scene {i+1}/{total}: Generating video...")
710
+ try:
711
+ video_url = generate_video(
712
+ video_prompt, video_provider, video_key,
713
+ duration=video_duration, aspect=aspect_ratio, image_url=image_url,
714
+ )
715
+ scene_video_path = os.path.join(tmp_dir, f"scene_{i:03d}_video.mp4")
716
+ if video_url:
717
+ download_video(video_url, scene_video_path)
718
+ else:
719
+ raise RuntimeError("No video URL returned")
720
+ except Exception as e:
721
+ # Create a black placeholder video
722
+ scene_video_path = os.path.join(tmp_dir, f"scene_{i:03d}_black.mp4")
723
+ subprocess.run([
724
+ "ffmpeg", "-y", "-f", "lavfi", "-i",
725
+ f"color=c=black:s=1280x720:d={video_duration}:r=24",
726
+ "-c:v", "libx264", "-preset", "fast",
727
+ scene_video_path,
728
+ ], capture_output=True, check=True)
729
+ all_transcripts.append(f"Scene {i+1} video failed: {str(e)[:100]}")
730
+
731
+ # --- Step 3: Generate narration audio ---
732
+ progress(base_frac + 0.04, desc=f"Scene {i+1}/{total}: Narrating...")
733
+ scene_audio_path = os.path.join(tmp_dir, f"scene_{i:03d}_audio.wav")
734
+
735
+ try:
736
+ if use_clone:
737
+ audio_url, transcript = narrate_scene_cloned(
738
+ client, scene_text, cloned_voice_id, target_language, lang_config, translate, ds_key,
739
+ )
740
+ if audio_url:
741
+ download_video(audio_url, scene_audio_path)
742
+ else:
743
+ generate_silence(5, scene_audio_path)
744
+ else:
745
+ voice = get_voice_name(preset_voice_label)
746
+ audio_b64, transcript = narrate_scene_preset(
747
+ client, scene_text, voice, target_language, lang_config, translate,
748
+ )
749
+ if audio_b64:
750
+ base64_to_wav(audio_b64, scene_audio_path)
751
+ else:
752
+ generate_silence(5, scene_audio_path)
753
+ transcript = "No audio generated"
754
+
755
+ if transcript:
756
+ all_transcripts.append(f"**Scene {i+1}:** {transcript[:200]}...")
757
+ except Exception as e:
758
+ generate_silence(5, scene_audio_path)
759
+ all_transcripts.append(f"Scene {i+1} narration failed: {str(e)[:100]}")
760
+
761
+ # --- Step 4: Combine video + audio for this scene ---
762
+ progress(base_frac + 0.06, desc=f"Scene {i+1}/{total}: Compositing...")
763
+ scene_final = os.path.join(tmp_dir, f"scene_{i:03d}_final.mp4")
764
+ try:
765
+ create_scene_video(scene_video_path, scene_audio_path, scene_final)
766
+ scene_videos.append(scene_final)
767
+ except Exception as e:
768
+ all_transcripts.append(f"Scene {i+1} compositing failed: {str(e)[:100]}")
769
+
770
+ if not scene_videos:
771
+ raise gr.Error("No scenes were successfully generated.")
772
+
773
+ # -- Final assembly --
774
+ progress(0.95, desc="Assembling final storybook video...")
775
+ final_video = os.path.join(tmp_dir, "storybook.mp4")
776
+ concatenate_videos(scene_videos, final_video)
777
+
778
+ progress(1.0, desc="Done!")
779
+
780
+ # Stats
781
+ video_size = os.path.getsize(final_video) / (1024 * 1024)
782
+ stats = (
783
+ f"**Visual Storybook Generated!**\n\n"
784
+ f"- **Scenes:** {len(scene_videos)} / {total}\n"
785
+ f"- **Language:** {target_language} ({lang_config['native']})\n"
786
+ f"- **Voice:** {'Cloned' if use_clone else preset_voice_label}\n"
787
+ f"- **Video Provider:** {video_provider}\n"
788
+ f"- **File size:** {video_size:.1f} MB\n"
789
+ )
790
+
791
+ transcript_text = "\n\n---\n\n".join(all_transcripts) if all_transcripts else ""
792
+
793
+ return final_video, stats, transcript_text
794
+
795
+ except gr.Error:
796
+ raise
797
+ except Exception as e:
798
+ raise gr.Error(f"Pipeline error: {str(e)}")
799
+
800
+
801
+ # ==========================================
802
+ # GRADIO UI
803
+ # ==========================================
804
+ SAMPLE_TEXT = """Chapter 1: The Lighthouse
805
+
806
+ The old lighthouse stood at the edge of the world. Each morning, Elena climbed one hundred and forty-seven iron steps to the lamp room and watched the sun rise from the sea like a great golden coin.
807
+
808
+ "One day," she whispered to the seagulls, "I'll follow that sun to wherever it goes."
809
+
810
+ Her grandfather was a man of few words but many stories. He kept them locked away like treasures, only bringing them out on winter nights when storms howled outside.
811
+
812
+ Chapter 2: The Storm
813
+
814
+ The storm came without warning. Dark clouds swallowed the horizon and the sea rose in great heaving walls of grey-green water. Elena pressed her face to the glass of the lamp room and watched lightning split the sky.
815
+
816
+ Through the rain, she saw something impossible - a ship, its sails torn to ribbons, riding the crest of a monstrous wave. And on its deck, a figure stood perfectly still, as if the fury of the ocean meant nothing at all.
817
+
818
+ Chapter 3: The Stranger
819
+
820
+ He arrived at dawn, walking up the rocky path as if he had always known the way. His clothes were dry despite the storm, and his eyes held the same changing colors as the sea itself.
821
+
822
+ "I've been looking for this lighthouse," he said simply. "I've been looking for a very long time."
823
+
824
+ Elena's grandfather stood in the doorway, his weathered face unreadable. Then slowly, he stepped aside and let the stranger in."""
825
+
826
+ DESCRIPTION = """
827
+ # Visual Storybook / Animated Audiobook
828
+ ### English Text to Multi-Language Narrated Video
829
+ **Powered by Qwen3.5-Omni-Plus (narration) + HappyHorse 1.0 (video)**
830
+
831
+ Upload English text and generate a **narrated video storybook**:
832
+ - AI generates cinematic video scenes from your story
833
+ - Professional narration in 36 languages (preset or cloned voice)
834
+ - Each scene gets its own video + synchronized audio
835
+ - All scenes assembled into one final MP4
836
+
837
+ | Component | Model | What it does |
838
+ |-----------|-------|-------------|
839
+ | **Narration** | Qwen3.5-Omni-Plus | Translates + speaks the story |
840
+ | **Video** | HappyHorse 1.0 | Generates cinematic scene videos |
841
+ | **Scene Prompts** | Qwen3.5-Omni-Plus | Auto-creates video prompts from text |
842
+ """
843
+
844
+ lang_choices = []
845
+ for name, cfg in LANGUAGES.items():
846
+ if cfg["tier"] == "core":
847
+ lang_choices.append(f"* {name}")
848
+ for name, cfg in LANGUAGES.items():
849
+ if cfg["tier"] == "extended":
850
+ lang_choices.append(name)
851
+
852
+
853
+ def clean_lang(choice):
854
+ return choice.replace("* ", "").strip()
855
+
856
+
857
+ def on_voice_mode_change(mode):
858
+ if mode == "Clone a Voice":
859
+ return gr.update(visible=False), gr.update(visible=True)
860
+ return gr.update(visible=True), gr.update(visible=False)
861
+
862
+
863
+ def on_video_mode_change(mode):
864
+ show_images = "Image" in mode or "Both" in mode
865
+ return gr.update(visible=show_images)
866
+
867
+
868
+ def generate_wrapper(text_input, file_input, lang, voice_mode, preset_voice,
869
+ clone_audio, video_provider, video_mode, scene_images,
870
+ aspect, duration, progress=gr.Progress()):
871
+ language = clean_lang(lang)
872
+ imgs = None
873
+ if scene_images:
874
+ imgs = [f.name if hasattr(f, 'name') else f for f in scene_images] if isinstance(scene_images, list) else scene_images
875
+ return generate_storybook(
876
+ text_input, file_input, language, voice_mode, preset_voice,
877
+ clone_audio, video_provider, video_mode, imgs, aspect, duration, progress,
878
+ )
879
+
880
+
881
+ with gr.Blocks(
882
+ title="Visual Storybook Generator",
883
+ theme=gr.themes.Soft(primary_hue="amber", secondary_hue="orange", neutral_hue="stone"),
884
+ ) as demo:
885
+
886
+ gr.Markdown(DESCRIPTION)
887
+
888
+ with gr.Row():
889
+ # -- LEFT: Inputs --
890
+ with gr.Column(scale=1):
891
+ with gr.Tab("Story"):
892
+ text_input = gr.Textbox(label="English Text", placeholder="Paste your story...", lines=8, max_lines=20)
893
+ file_input = gr.File(
894
+ label="Or Upload (.txt, .md, .pdf, .docx)",
895
+ file_types=[".txt", ".md", ".pdf", ".docx"],
896
+ type="filepath",
897
+ )
898
+ sample_btn = gr.Button("Load Sample Story", variant="secondary", size="sm")
899
+
900
+ with gr.Tab("Voice"):
901
+ target_lang = gr.Dropdown(choices=lang_choices, value="* English", label="Target Language")
902
+ voice_mode = gr.Radio(choices=["Preset Voice", "Clone a Voice"], value="Preset Voice", label="Voice Mode")
903
+ preset_voice = gr.Dropdown(choices=PRESET_VOICES, value="Jennifer -- Cinematic narrator", label="Narrator Voice", visible=True)
904
+ clone_audio = gr.Audio(label="Voice Sample (10s-3min)", type="filepath", visible=False)
905
+
906
+ with gr.Tab("Video"):
907
+ video_provider = gr.Dropdown(
908
+ choices=list(HAPPYHORSE_PROVIDERS.keys()),
909
+ value="fal.ai",
910
+ label="HappyHorse API Provider",
911
+ info="Each provider needs its own API key in Secrets",
912
+ )
913
+ video_mode = gr.Dropdown(
914
+ choices=[
915
+ "Text-to-Video (auto scene prompts)",
916
+ "Image-to-Video (animate uploaded images)",
917
+ "Both (images where available, text for rest)",
918
+ ],
919
+ value="Text-to-Video (auto scene prompts)",
920
+ label="Video Generation Mode",
921
+ )
922
+ scene_images = gr.File(
923
+ label="Scene Images (one per scene, optional)",
924
+ file_types=["image"],
925
+ file_count="multiple",
926
+ visible=False,
927
+ )
928
+ with gr.Row():
929
+ aspect_ratio = gr.Dropdown(choices=["16:9", "9:16", "1:1"], value="16:9", label="Aspect Ratio")
930
+ video_duration = gr.Slider(minimum=3, maximum=10, value=5, step=1, label="Video Duration (sec/scene)")
931
+
932
+ generate_btn = gr.Button("Generate Visual Storybook", variant="primary", size="lg")
933
+
934
+ # -- RIGHT: Output --
935
+ with gr.Column(scale=1):
936
+ video_output = gr.Video(label="Generated Storybook Video")
937
+ stats_output = gr.Markdown(label="Stats")
938
+ with gr.Accordion("Scene Transcripts", open=False):
939
+ transcript_output = gr.Markdown()
940
+
941
+ # Events
942
+ sample_btn.click(fn=lambda: SAMPLE_TEXT, outputs=text_input)
943
+ voice_mode.change(fn=on_voice_mode_change, inputs=voice_mode, outputs=[preset_voice, clone_audio])
944
+ video_mode.change(fn=on_video_mode_change, inputs=video_mode, outputs=[scene_images])
945
+
946
+ generate_btn.click(
947
+ fn=generate_wrapper,
948
+ inputs=[text_input, file_input, target_lang, voice_mode, preset_voice,
949
+ clone_audio, video_provider, video_mode, scene_images,
950
+ aspect_ratio, video_duration],
951
+ outputs=[video_output, stats_output, transcript_output],
952
+ )
953
+
954
+ gr.Markdown(
955
+ "---\n"
956
+ "**Pipeline:** Text > Split into scenes > Qwen generates video prompts > "
957
+ "HappyHorse generates video per scene > Qwen narrates audio per scene > "
958
+ "FFmpeg composites video+audio > Concatenates all scenes into final MP4\n\n"
959
+ "**API Keys needed:** DASHSCOPE_API_KEY (audio) + one of: FAL_API_KEY, HAPPYHORSE_API_KEY, "
960
+ "or DASHSCOPE_API_KEY also for video (DashScope provider)\n\n"
961
+ "Built with Gradio | Narration by Qwen | Video by HappyHorse 1.0"
962
+ )
963
+
964
+ if __name__ == "__main__":
965
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openai>=1.52.0
2
+ gradio>=5.25.0
3
+ audioop-lts; python_version >= "3.13"
4
+ pypdf>=4.0.0
5
+ python-docx>=1.1.0
6
+ requests>=2.31.0