|
import gradio as gr |
|
import os |
|
import time |
|
from moviepy.editor import * |
|
|
|
|
|
|
|
caption = gr.Blocks.load(name="spaces/laion/CoCa") |
|
audio_gen = gr.Blocks.load(name="spaces/haoheliu/audioldm-text-to-audio-generation") |
|
|
|
ph_message="If you're not happy with sound result, you can manually describe the scene depicted in your image :)" |
|
|
|
def extract_video_frames(video_in): |
|
|
|
|
|
clip = VideoFileClip(video_in) |
|
|
|
|
|
total_duration = clip.duration |
|
|
|
if total_duration > 5: |
|
clip = clip.subclip(0,5) |
|
total_duration = clip.duration |
|
|
|
|
|
intervals = [0, 2, 4, total_duration] |
|
|
|
|
|
frames = [] |
|
|
|
|
|
for i, interval in enumerate(intervals): |
|
|
|
frame = clip.get_frame(interval) |
|
|
|
|
|
ImageClip(frame).write_image(f'frame{i}.jpg') |
|
|
|
frames.append(f'frame{i}.jpg') |
|
|
|
return frames |
|
|
|
|
|
def input_changes(input_vid): |
|
|
|
if input_vid == None: |
|
return manual_cap.update(value="",placeholder=ph_message), caption_output.update(value=None), sound_output.update(value=None) |
|
else: |
|
picked_frames = extract_video_frames(input_vid) |
|
caps = [] |
|
for one_frame in picked_frames: |
|
cap = caption(one_frame, "Beam search", 1.2, 0.5, 5, 20, fn_index=0) |
|
caps.append(cap) |
|
final_cap = '\n then'.join(caps) |
|
print(final_cap) |
|
print("CoCa caption: '" + final_cap + "' β’ ") |
|
ph_update = "CoCa caption: '" + final_cap + "' β’ " |
|
|
|
return manual_cap.update(value="",placeholder=f"{ph_update}{ph_message}"), caption_output.update(value=final_cap), sound_output.update(value=None) |
|
|
|
def infer(video_input, manual_caption, duration_in, seed, caption_output): |
|
|
|
print(duration_in) |
|
if manual_caption == "": |
|
cap = caption_output |
|
|
|
|
|
|
|
else: |
|
cap = manual_caption |
|
print("manual caption: " + cap) |
|
ph_update="" |
|
|
|
sound = audio_gen(cap, duration_in, 2.5, seed, 3, "audioldm-m-text-ft", fn_index=0) |
|
|
|
print(sound) |
|
|
|
|
|
video = VideoFileClip(sound) |
|
audio = video.audio |
|
|
|
|
|
|
|
|
|
|
|
video_in = VideoFileClip(video_input) |
|
duration = video_in.duration |
|
if duration > 5: |
|
video_in = video_in.subclip(0,5) |
|
|
|
|
|
audio = audio.set_duration(video_in.duration) |
|
|
|
|
|
result = video_in.set_audio(audio) |
|
|
|
|
|
result.write_videofile("result.mp4") |
|
|
|
|
|
|
|
|
|
return cap, "result.mp4" |
|
|
|
title = """ |
|
<div style="text-align: center; max-width: 700px; margin: 0 auto;"> |
|
<div |
|
style=" |
|
display: inline-flex; |
|
align-items: center; |
|
gap: 0.8rem; |
|
font-size: 1.75rem; |
|
" |
|
> |
|
<h1 style="font-weight: 900; margin-bottom: 7px; margin-top: 5px;"> |
|
Video to Sound Effect |
|
</h1> |
|
</div> |
|
<p style="margin-bottom: 10px; font-size: 94%"> |
|
Convert images from video to a corresponding sound effect generated through CoCa Image Captioning & AudioLDM. <br /> |
|
This demo is experimental and works only with exactly 5 seconds videos. |
|
</p> |
|
</div> |
|
""" |
|
|
|
article = """ |
|
|
|
<div class="footer"> |
|
<p> |
|
|
|
Follow <a href="https://twitter.com/fffiloni" target="_blank">Sylvain Filoni</a> for future updates π€ |
|
</p> |
|
</div> |
|
|
|
<div id="may-like-container" style="display: flex;justify-content: center;flex-direction: column;align-items: center;margin-bottom: 30px;"> |
|
<p>You may also like: </p> |
|
|
|
<div id="may-like-content" style="display:flex;flex-wrap: wrap;align-items:center;height:20px;"> |
|
|
|
<svg height="20" width="208" style="margin-left:4px;margin-bottom: 6px;"> |
|
<a href="https://huggingface.co/spaces/haoheliu/audioldm-text-to-audio-generation" target="_blank"> |
|
<image href="https://img.shields.io/badge/π€ Spaces-AudioLDM_Text_to_Audio-blue" src="https://img.shields.io/badge/π€ Spaces-AudioLDM_Text_to_Audio-blue.png" height="20"/> |
|
</a> |
|
</svg> |
|
|
|
<svg height="20" width="122" style="margin-left:4px;margin-bottom: 6px;"> |
|
<a href="https://huggingface.co/spaces/fffiloni/spectrogram-to-music" target="_blank"> |
|
<image href="https://img.shields.io/badge/π€ Spaces-Riffusion-blue" src="https://img.shields.io/badge/π€ Spaces-Riffusion-blue.png" height="20"/> |
|
</a> |
|
</svg> |
|
|
|
</div> |
|
</div> |
|
""" |
|
|
|
with gr.Blocks(css="style.css") as demo: |
|
with gr.Column(elem_id="col-container"): |
|
|
|
gr.HTML(title) |
|
|
|
input_vid = gr.Video(source="upload", type="filepath", elem_id="input-vid") |
|
|
|
with gr.Column(): |
|
manual_cap = gr.Textbox(label="Manual Video description (optional)", lines=3, placeholder=ph_message) |
|
with gr.Row(): |
|
duration_in = gr.Slider(interactive=False, minimum=5, maximum=10, step=5, value=5, label="Duration") |
|
seed_in = gr.Slider(label="Seed", value=440, minimum=45, maximum=10000, step=1) |
|
|
|
caption_output = gr.Textbox(label="Caption", visible=False, elem_id="text-caption") |
|
video_output = gr.Video(label="Result", elem_id="video-output") |
|
|
|
generate = gr.Button("Generate SFX from Video") |
|
|
|
|
|
|
|
|
|
|
|
|
|
gr.HTML(article) |
|
|
|
change_out = [manual_cap, caption_output, video_output] |
|
input_vid.change(input_changes, input_vid, change_out, queue=False) |
|
|
|
|
|
|
|
generate.click(infer, inputs=[input_vid, manual_cap, duration_in, seed_in, caption_output], outputs=[caption_output, video_output], api_name="v2fx") |
|
|
|
|
|
demo.queue(max_size=32).launch(debug=True) |
|
|