Superintelligence1130 commited on
Commit
b7d6967
1 Parent(s): 0ec4de5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -21
app.py CHANGED
@@ -31,30 +31,41 @@ import torch
31
  import imageio
32
  from diffusers import TextToVideoZeroPipeline
33
  import numpy as np
 
34
 
35
  model_id = "runwayml/stable-diffusion-v1-5"
36
  pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
37
  seed = 0
38
  video_length = 8
39
  chunk_size = 4
40
- prompt = "A panda is playing guitar on times square"
41
-
42
- # Generate the video chunk-by-chunk
43
- result = []
44
- chunk_ids = np.arange(0, video_length, chunk_size - 1)
45
- generator = torch.Generator(device="cuda")
46
- for i in range(len(chunk_ids)):
47
- print(f"Processing chunk {i + 1} / {len(chunk_ids)}")
48
- ch_start = chunk_ids[i]
49
- ch_end = video_length if i == len(chunk_ids) - 1 else chunk_ids[i + 1]
50
- # Attach the first frame for Cross Frame Attention
51
- frame_ids = [0] + list(range(ch_start, ch_end))
52
- # Fix the seed for the temporal consistency
53
- generator.manual_seed(seed)
54
- output = pipe(prompt=prompt, video_length=len(frame_ids), generator=generator, frame_ids=frame_ids)
55
- result.append(output.images[1:])
56
-
57
- # Concatenate chunks and save
58
- result = np.concatenate(result)
59
- result = [(r * 255).astype("uint8") for r in result]
60
- imageio.mimsave("video.mp4", result, fps=4)
 
 
 
 
 
 
 
 
 
 
 
31
  import imageio
32
  from diffusers import TextToVideoZeroPipeline
33
  import numpy as np
34
+ import gradio as gr
35
 
36
  model_id = "runwayml/stable-diffusion-v1-5"
37
  pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")
38
  seed = 0
39
  video_length = 8
40
  chunk_size = 4
41
+ def text_video(prompt):
42
+
43
+
44
+
45
+ # Generate the video chunk-by-chunk
46
+ result = []
47
+ chunk_ids = np.arange(0, video_length, chunk_size - 1)
48
+ generator = torch.Generator(device="cuda")
49
+ for i in range(len(chunk_ids)):
50
+ print(f"Processing chunk {i + 1} / {len(chunk_ids)}")
51
+ ch_start = chunk_ids[i]
52
+ ch_end = video_length if i == len(chunk_ids) - 1 else chunk_ids[i + 1]
53
+ # Attach the first frame for Cross Frame Attention
54
+ frame_ids = [0] + list(range(ch_start, ch_end))
55
+ # Fix the seed for the temporal consistency
56
+ generator.manual_seed(seed)
57
+ output = pipe(prompt=prompt, video_length=len(frame_ids), generator=generator, frame_ids=frame_ids)
58
+ result.append(output.images[1:])
59
+
60
+ # Concatenate chunks and save
61
+ result = np.concatenate(result)
62
+ result = [(r * 255).astype("uint8") for r in result]
63
+ imageio.mimsave("video.mp4", result, fps=4)
64
+
65
+ result = gr.Video(label="Generated Video")
66
+ gr.Interface(
67
+ fn=text_video,
68
+ inputs=gr.Textbox(label="어떤 비디오를 생성할까요? : "),
69
+ outputs=result
70
+
71
+ ).launch()