fffiloni commited on
Commit
91b1f35
1 Parent(s): 1eab382

added custom model option

Browse files
Files changed (1) hide show
  1. app.py +45 -27
app.py CHANGED
@@ -1,5 +1,6 @@
1
  import gradio as gr
2
  import os
 
3
  import subprocess
4
  from share_btn import community_icon_html, loading_icon_html, share_js
5
  import cv2
@@ -22,7 +23,17 @@ for model_id in model_ids:
22
  model_name = model_id.split('/')[-1]
23
  snapshot_download(model_id, local_dir=f'checkpoints/{model_name}')
24
 
 
 
 
 
 
 
25
 
 
 
 
 
26
 
27
  def get_frame_count(filepath):
28
  if filepath is not None:
@@ -259,38 +270,45 @@ with gr.Blocks(css=css) as demo:
259
 
260
  [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg#center)](https://huggingface.co/spaces/fffiloni/ControlVideo?duplicate=true)
261
  """)
262
- with gr.Row():
263
- with gr.Column():
264
- #video_in = gr.Video(source="upload", type="filepath", visible=True)
265
  video_path = gr.Video(label="Input video", source="upload", type="filepath", visible=True, elem_id="video-in")
266
- prompt = gr.Textbox(label="prompt", elem_id="prompt-in")
267
  with gr.Column():
268
- video_length = gr.Slider(label="Video length", info="How many frames do you want to process ? For demo purpose, max is set to 24", minimum=1, maximum=12, step=1, value=2)
269
- with gr.Row():
270
- condition = gr.Dropdown(label="Condition", choices=["depth", "canny", "pose"], value="depth")
271
- seed = gr.Number(label="seed", value=42)
272
- submit_btn = gr.Button("Submit")
 
 
 
 
 
273
  with gr.Column():
274
- video_res = gr.Video(label="result", elem_id="video-out")
275
- with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
276
- community_icon = gr.HTML(community_icon_html)
277
- loading_icon = gr.HTML(loading_icon_html)
278
- share_button = gr.Button("Share to community", elem_id="share-btn")
279
- gr.Examples(
280
- examples=[["Indiana Jones moonwalk in the wild jungle", "./examples/moonwalk.mp4", 'depth', 24, 192837465]],
281
- fn=run_inference,
282
- inputs=[prompt,
283
- video_path,
284
- condition,
285
- video_length,
286
- seed
287
- ],
288
- outputs=[video_res, share_group],
289
- cache_examples=True
290
- )
 
 
 
291
 
292
  share_button.click(None, [], [], _js=share_js)
293
-
294
  video_path.change(fn=get_frame_count,
295
  inputs=[video_path],
296
  outputs=[video_length],
 
1
  import gradio as gr
2
  import os
3
+ import shutil
4
  import subprocess
5
  from share_btn import community_icon_html, loading_icon_html, share_js
6
  import cv2
 
23
  model_name = model_id.split('/')[-1]
24
  snapshot_download(model_id, local_dir=f'checkpoints/{model_name}')
25
 
26
+ def load_model(model_id):
27
+ local_dir = f'checkpoints/stable-diffusion-v1-5'
28
+ # Check if the directory exists
29
+ if os.path.exists(local_dir):
30
+ # Delete the directory if it exists
31
+ shutil.rmtree(local_dir)
32
 
33
+ model_name = model_id.split('/')[-1]
34
+ snapshot_download(model_id, local_dir=f'checkpoints/{model_name}')
35
+ os.rename(f'checkpoints/{model_name}', f'checkpoints/stable-diffusion-v1-5')
36
+ return "model loaded"
37
 
38
  def get_frame_count(filepath):
39
  if filepath is not None:
 
270
 
271
  [![Duplicate this Space](https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-sm.svg#center)](https://huggingface.co/spaces/fffiloni/ControlVideo?duplicate=true)
272
  """)
273
+
274
+ with gr.Column():
275
+ with gr.Row():
276
  video_path = gr.Video(label="Input video", source="upload", type="filepath", visible=True, elem_id="video-in")
 
277
  with gr.Column():
278
+ video_res = gr.Video(label="result", elem_id="video-out")
279
+ with gr.Group(elem_id="share-btn-container", visible=False) as share_group:
280
+ community_icon = gr.HTML(community_icon_html)
281
+ loading_icon = gr.HTML(loading_icon_html)
282
+ share_button = gr.Button("Share to community", elem_id="share-btn")
283
+ with gr.Row():
284
+ chosen_model = gr.Textbox(label="Custom model (*1.5)", placeholder="E.g: nitrosocke/Ghibli-Diffusion")
285
+ model_status = gr.Textbox(label="status")
286
+ load_model_btn = gr.Button("load model (optional)")
287
+ prompt = gr.Textbox(label="prompt", info="If you loaded a custom model, do not forget to include Prompt trigger", elem_id="prompt-in")
288
  with gr.Column():
289
+ video_length = gr.Slider(label="Video length", info="How many frames do you want to process ? For demo purpose, max is set to 24", minimum=1, maximum=12, step=1, value=2)
290
+ with gr.Row():
291
+ condition = gr.Dropdown(label="Condition", choices=["depth", "canny", "pose"], value="depth")
292
+ seed = gr.Number(label="seed", value=42)
293
+ submit_btn = gr.Button("Submit")
294
+
295
+
296
+
297
+ gr.Examples(
298
+ examples=[["Indiana Jones moonwalk in the wild jungle", "./examples/moonwalk.mp4", 'depth', 24, 192837465]],
299
+ fn=run_inference,
300
+ inputs=[prompt,
301
+ video_path,
302
+ condition,
303
+ video_length,
304
+ seed
305
+ ],
306
+ outputs=[video_res, share_group],
307
+ cache_examples=False
308
+ )
309
 
310
  share_button.click(None, [], [], _js=share_js)
311
+ load_model_btn.click(fn=load_model, inputs=[chosen_model], outputs=[model_status], queue=False)
312
  video_path.change(fn=get_frame_count,
313
  inputs=[video_path],
314
  outputs=[video_length],