PeterL1n commited on
Commit
337bc14
1 Parent(s): 13600b0
Files changed (1) hide show
  1. app.py +21 -5
app.py CHANGED
@@ -17,6 +17,7 @@ bases = {
17
  }
18
  step_loaded = None
19
  base_loaded = "ToonYou"
 
20
 
21
  # Ensure model and scheduler are initialized in GPU-enabled function
22
  if not torch.cuda.is_available():
@@ -29,7 +30,7 @@ pipe.scheduler = EulerDiscreteScheduler.from_config(pipe.scheduler.config, times
29
 
30
  # Function
31
  @spaces.GPU(enable_queue=True)
32
- def generate_image(prompt, base, step):
33
  global step_loaded
34
  global base_loaded
35
  print(prompt, base, step)
@@ -44,6 +45,11 @@ def generate_image(prompt, base, step):
44
  pipe.unet.load_state_dict(torch.load(hf_hub_download(bases[base], "unet/diffusion_pytorch_model.bin"), map_location=device), strict=False)
45
  base_loaded = base
46
 
 
 
 
 
 
47
  output = pipe(prompt=prompt, guidance_scale=1.0, num_inference_steps=step)
48
  name = str(uuid.uuid4()).replace("-", "")
49
  path = f"/tmp/{name}.mp4"
@@ -58,9 +64,9 @@ with gr.Blocks(css="style.css") as demo:
58
  with gr.Group():
59
  with gr.Row():
60
  prompt = gr.Textbox(
61
- label='Prompt (English)',
62
- scale=8
63
  )
 
64
  select_base = gr.Dropdown(
65
  label='Base model',
66
  choices=[
@@ -70,6 +76,16 @@ with gr.Blocks(css="style.css") as demo:
70
  value=base_loaded,
71
  interactive=True
72
  )
 
 
 
 
 
 
 
 
 
 
73
  select_step = gr.Dropdown(
74
  label='Inference steps',
75
  choices=[
@@ -94,12 +110,12 @@ with gr.Blocks(css="style.css") as demo:
94
 
95
  prompt.submit(
96
  fn=generate_image,
97
- inputs=[prompt, select_base, select_step],
98
  outputs=video,
99
  )
100
  submit.click(
101
  fn=generate_image,
102
- inputs=[prompt, select_base, select_step],
103
  outputs=video,
104
  )
105
 
 
17
  }
18
  step_loaded = None
19
  base_loaded = "ToonYou"
20
+ motion_loaded = None
21
 
22
  # Ensure model and scheduler are initialized in GPU-enabled function
23
  if not torch.cuda.is_available():
 
30
 
31
  # Function
32
  @spaces.GPU(enable_queue=True)
33
+ def generate_image(prompt, base, motion, step):
34
  global step_loaded
35
  global base_loaded
36
  print(prompt, base, step)
 
45
  pipe.unet.load_state_dict(torch.load(hf_hub_download(bases[base], "unet/diffusion_pytorch_model.bin"), map_location=device), strict=False)
46
  base_loaded = base
47
 
48
+ if motion_loaded != motion:
49
+ pipe.unload_lora_weights()
50
+ pipe.load_lora_weights(hf_hub_download("guoyww/animatediff", motion))
51
+ motion_loaded = motion
52
+
53
  output = pipe(prompt=prompt, guidance_scale=1.0, num_inference_steps=step)
54
  name = str(uuid.uuid4()).replace("-", "")
55
  path = f"/tmp/{name}.mp4"
 
64
  with gr.Group():
65
  with gr.Row():
66
  prompt = gr.Textbox(
67
+ label='Prompt (English)'
 
68
  )
69
+ with gr.Row():
70
  select_base = gr.Dropdown(
71
  label='Base model',
72
  choices=[
 
76
  value=base_loaded,
77
  interactive=True
78
  )
79
+ select_motion = gr.Dropdown(
80
+ label='Motion LoRAs',
81
+ choices=[
82
+ ("None", None),
83
+ ("Zoom in", "v2_lora_ZoomIn.ckpt"),
84
+ ("Zoom out", "v2_lora_ZoomOut.ckpt"),
85
+ ],
86
+ value=None,
87
+ interactive=True
88
+ )
89
  select_step = gr.Dropdown(
90
  label='Inference steps',
91
  choices=[
 
110
 
111
  prompt.submit(
112
  fn=generate_image,
113
+ inputs=[prompt, select_base, select_motion, select_step],
114
  outputs=video,
115
  )
116
  submit.click(
117
  fn=generate_image,
118
+ inputs=[prompt, select_base, select_motion, select_step],
119
  outputs=video,
120
  )
121