Files changed (1) hide show
  1. app (7).py +53 -0
app (7).py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import torch
3
+ from diffusers import CogVideoXImageToVideoPipeline
4
+ from diffusers.utils import export_to_video, load_image
5
+ import gradio as gr
6
+
7
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
8
+ "THUDM/CogVideoX-5b-I2V",
9
+ torch_dtype=torch.bfloat16
10
+ )
11
+
12
+ pipe.vae.enable_tiling()
13
+ pipe.vae.enable_slicing()
14
+
15
+ @spaces.GPU(duration=250)
16
+ def generate_video(prompt, image):
17
+ video = pipe(
18
+ prompt=prompt,
19
+ image=image,
20
+ num_videos_per_prompt=1,
21
+ num_inference_steps=50,
22
+ num_frames=49,
23
+ guidance_scale=6,
24
+ generator=torch.Generator(device="cuda").manual_seed(42),
25
+ ).frames[0]
26
+
27
+ video_path = "output.mp4"
28
+ export_to_video(video, video_path, fps=8)
29
+
30
+ return video_path
31
+
32
+ # Interface Gradio
33
+ with gr.Blocks() as demo:
34
+ gr.Markdown("# Image to Video Generation")
35
+
36
+ with gr.Row():
37
+ # Entrada de texto para o prompt
38
+ prompt_input = gr.Textbox(label="Prompt", value="A little girl is riding a bicycle at high speed. Focused, detailed, realistic.")
39
+
40
+ # Upload de imagem
41
+ image_input = gr.Image(label="Upload an Image", type="pil")
42
+
43
+ # Botão para gerar o vídeo
44
+ generate_button = gr.Button("Generate Video")
45
+
46
+ # Saída do vídeo gerado
47
+ video_output = gr.Video(label="Generated Video")
48
+
49
+ # Ação ao clicar no botão
50
+ generate_button.click(fn=generate_video, inputs=[prompt_input, image_input], outputs=video_output)
51
+
52
+ # Rodar a interface
53
+ demo.launch()