juancopi81 commited on
Commit
02fcd9e
1 Parent(s): 595153d

Change examples to be at the bottom

Browse files
Files changed (3) hide show
  1. app.py +12 -16
  2. textprocessor.py +1 -1
  3. videocreator.py +1 -1
app.py CHANGED
@@ -69,9 +69,13 @@ def datapipeline(url: str,
69
  # Select transcriber
70
  if video_language == "Spanish":
71
  audio_transcriber = es_audio_transcriber
72
- video_creator = VideoCreator(es_tts, image_pipeline)
73
  elif video_language == "English":
74
  audio_transcriber = en_audio_transcriber
 
 
 
 
 
75
  video_creator = VideoCreator(en_tts, image_pipeline)
76
  else:
77
  return "Language not supported"
@@ -236,18 +240,6 @@ with block as demo:
236
  <p style="margin-bottom: 10px; font-size: 94%">
237
  Running on <b>{device_print}</b>
238
  </p>
239
- <p>
240
- Some samples videos you can try:
241
- <ul>
242
- <li>https://www.youtube.com/watch?v=Hk5evm1NgzA (Little Red Riding Hood. Infer time: c.a. 196 seconds)</li>
243
- <li>https://www.youtube.com/watch?v=nJxWS9jZ9-c (Elon Musk's Biography. Infer time: c.a. 176 seconds)</li>
244
- <li>https://www.youtube.com/watch?v=sRmmQBBln9Q (Cook recipe. Infer time: c.a. 200 seconds)</li>
245
- <li>https://www.youtube.com/watch?v=qz4Wc48KITA (Poem by Edgar Allan Poe. Infer time: c.a. 200 seconds)</li>
246
- <li>https://www.youtube.com/watch?v=2D8CaoIY7Lk (The history of Christmas trees. Infer time: c.a. 130 seconds)</li>
247
- <li>https://www.youtube.com/watch?v=uhmRR-Ir7Bk (Dec. 20 news. Infer time: c.a. 230 seconds)</li>
248
- <li>https://www.youtube.com/watch?v=CT9T7Dp63x4 (Presentation of movie Lady Chatterley's Lover. Infer time: c.a. 277 seconds)</li>
249
- </ul>
250
- </p>
251
  </div>
252
  """
253
  )
@@ -296,9 +288,13 @@ with block as demo:
296
  video_styles],
297
  outputs=[video_output, file_output])
298
 
299
- #gr.Examples(
300
- # examples=[[], []]
301
- #)
 
 
 
 
302
  gr.HTML(
303
  """
304
  <div class="footer">
 
69
  # Select transcriber
70
  if video_language == "Spanish":
71
  audio_transcriber = es_audio_transcriber
 
72
  elif video_language == "English":
73
  audio_transcriber = en_audio_transcriber
74
+ else:
75
+ return "Language not supported"
76
+ if summary_language == "Spanish":
77
+ video_creator = VideoCreator(es_tts, image_pipeline)
78
+ elif summary_language == "English":
79
  video_creator = VideoCreator(en_tts, image_pipeline)
80
  else:
81
  return "Language not supported"
 
240
  <p style="margin-bottom: 10px; font-size: 94%">
241
  Running on <b>{device_print}</b>
242
  </p>
 
 
 
 
 
 
 
 
 
 
 
 
243
  </div>
244
  """
245
  )
 
288
  video_styles],
289
  outputs=[video_output, file_output])
290
 
291
+ gr.Examples(
292
+ examples=[["https://www.youtube.com/watch?v=c0i5016pB2Y", "English", "Spanish", "oil on painting"],
293
+ ["https://www.youtube.com/watch?v=Hk5evm1NgzA", "Spanish", "English", "dramatic lighting, cinematic, matte painting"],
294
+ ["https://www.youtube.com/watch?v=sRmmQBBln9Q", "Spanish", "Spanish", "Hyper real, 4k"],
295
+ ["https://www.youtube.com/watch?v=qz4Wc48KITA", "Spanish", "English", "detailed art by kay nielsen and walter crane, illustration style, watercolor"]],
296
+ inputs=[url, video_language, summary_language, video_styles]
297
+ )
298
  gr.HTML(
299
  """
300
  <div class="footer">
textprocessor.py CHANGED
@@ -37,7 +37,7 @@ class TextProcessor:
37
  prompt: str,
38
  summary_language: str) -> Dict:
39
  gpt_prompt = context_prompt.replace("$TRANSCRIPTION", prompt)
40
- gpt_prompt = gpt_prompt.replace("$SUMMARY_LANGUAGE ", summary_language)
41
  print("gpt_prompt", gpt_prompt)
42
  response = openai.Completion.create(
43
  model=self.model,
 
37
  prompt: str,
38
  summary_language: str) -> Dict:
39
  gpt_prompt = context_prompt.replace("$TRANSCRIPTION", prompt)
40
+ gpt_prompt = gpt_prompt.replace("$SUMMARY_LANGUAGE", summary_language)
41
  print("gpt_prompt", gpt_prompt)
42
  response = openai.Completion.create(
43
  model=self.model,
videocreator.py CHANGED
@@ -35,7 +35,7 @@ class VideoCreator:
35
  return "output.wav"
36
 
37
  def _get_bg_image_from_description(self, img_desc: str, video_styles: str):
38
- images = self.image_pipeline(img_desc + video_styles)
39
  print("Image generated!")
40
  image_output = images.images[0]
41
  image_output.save("img.png")
 
35
  return "output.wav"
36
 
37
  def _get_bg_image_from_description(self, img_desc: str, video_styles: str):
38
+ images = self.image_pipeline(img_desc + ", " + video_styles)
39
  print("Image generated!")
40
  image_output = images.images[0]
41
  image_output.save("img.png")