lev1 commited on
Commit
b1373ae
1 Parent(s): cf67ed4

Demo v2: pose, edge, edge+DB and text conditional generation

Browse files
Files changed (4) hide show
  1. app.py +2 -7
  2. app_canny.py +49 -1
  3. app_canny_db.py +82 -1
  4. app_pose.py +57 -1
app.py CHANGED
@@ -39,19 +39,14 @@ with gr.Blocks(css='style.css') as demo:
39
  """)
40
 
41
  with gr.Tab('Zero-Shot Text2Video'):
42
- # pass
43
  create_demo_text_to_video(model)
44
  with gr.Tab('Video Instruct Pix2Pix'):
45
- # pass
46
  create_demo_pix2pix_video(model)
47
  with gr.Tab('Pose Conditional'):
48
- # pass
49
  create_demo_pose(model)
50
  with gr.Tab('Edge Conditional'):
51
- # pass
52
  create_demo_canny(model)
53
  with gr.Tab('Edge Conditional and Dreambooth Specialized'):
54
- # pass
55
  create_demo_canny_db(model)
56
 
57
  gr.HTML(
@@ -75,5 +70,5 @@ with gr.Blocks(css='style.css') as demo:
75
  </div>
76
  """)
77
 
78
- demo.launch(debug=True)
79
- # demo.queue(api_open=False).launch(file_directories=['temporal'], share=True)
 
39
  """)
40
 
41
  with gr.Tab('Zero-Shot Text2Video'):
 
42
  create_demo_text_to_video(model)
43
  with gr.Tab('Video Instruct Pix2Pix'):
 
44
  create_demo_pix2pix_video(model)
45
  with gr.Tab('Pose Conditional'):
 
46
  create_demo_pose(model)
47
  with gr.Tab('Edge Conditional'):
 
48
  create_demo_canny(model)
49
  with gr.Tab('Edge Conditional and Dreambooth Specialized'):
 
50
  create_demo_canny_db(model)
51
 
52
  gr.HTML(
 
70
  </div>
71
  """)
72
 
73
+ # demo.launch(debug=True)
74
+ demo.queue(api_open=False).launch(file_directories=['temporal'], share=True)
app_canny.py CHANGED
@@ -3,7 +3,55 @@ from model import Model
3
 
4
  def create_demo(model: Model):
5
 
 
 
 
 
 
 
 
 
 
 
6
  with gr.Blocks() as demo:
7
  with gr.Row():
8
- gr.Markdown('## Text and Canny-Edge Conditional Video Generation (coming soon)')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  return demo
 
3
 
4
  def create_demo(model: Model):
5
 
6
+ examples = [
7
+ ["__assets__/canny_videos_edge/butterfly.mp4", "white butterfly, a high-quality, detailed, and professional photo"],
8
+ ["__assets__/canny_videos_edge/deer.mp4", "oil painting of a deer, a high-quality, detailed, and professional photo"],
9
+ ["__assets__/canny_videos_edge/fox.mp4", "wild red fox is walking on the grass, a high-quality, detailed, and professional photo"],
10
+ ["__assets__/canny_videos_edge/girl_dancing.mp4", "oil painting of a girl dancing close-up, masterpiece, a high-quality, detailed, and professional photo"],
11
+ ["__assets__/canny_videos_edge/girl_turning.mp4", "oil painting of a beautiful girl, a high-quality, detailed, and professional photo"],
12
+ ["__assets__/canny_videos_edge/halloween.mp4", "beautiful girl halloween style, a high-quality, detailed, and professional photo"],
13
+ ["__assets__/canny_videos_edge/santa.mp4", "a santa claus, a high-quality, detailed, and professional photo"],
14
+ ]
15
+
16
  with gr.Blocks() as demo:
17
  with gr.Row():
18
+ gr.Markdown('## Text and Canny-Edge Conditional Video Generation')
19
+ with gr.Row():
20
+ gr.HTML(
21
+ """
22
+ <div style="text-align: left; auto;">
23
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
24
+ Description: TBD
25
+ </h3>
26
+ </div>
27
+ """)
28
+
29
+ with gr.Row():
30
+ gr.Markdown('### You can either use one of the below shown examples, or upload your own video from which edge-motions will be extracted. But Take into account that for now If your uploaded video has more than 8 frames, then we will uniformly select them and our method will run only on them.')
31
+
32
+ with gr.Row():
33
+ with gr.Column():
34
+ input_video = gr.Video(label="Input Video",source='upload', format="mp4", visible=True).style(height="auto")
35
+ with gr.Column():
36
+ prompt = gr.Textbox(label='Prompt')
37
+ run_button = gr.Button(label='Run')
38
+ with gr.Column():
39
+ result = gr.Video(label="Generated Video").style(height="auto")
40
+
41
+ inputs = [
42
+ input_video,
43
+ prompt,
44
+ ]
45
+
46
+ gr.Examples(examples=examples,
47
+ inputs=inputs,
48
+ outputs=result,
49
+ fn=model.process_controlnet_canny,
50
+ # cache_examples=os.getenv('SYSTEM') == 'spaces',
51
+ run_on_click=False,
52
+ )
53
+
54
+ run_button.click(fn=model.process_controlnet_canny,
55
+ inputs=inputs,
56
+ outputs=result,)
57
  return demo
app_canny_db.py CHANGED
@@ -2,10 +2,91 @@ import gradio as gr
2
  from model import Model
3
  import gradio_utils
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  def create_demo(model: Model):
7
 
8
  with gr.Blocks() as demo:
9
  with gr.Row():
10
- gr.Markdown('## Text, Canny-Edge and DreamBooth Conditional Video Generation (coming soon)')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  return demo
 
 
 
 
 
 
 
 
 
 
2
  from model import Model
3
  import gradio_utils
4
 
5
+ examples = [
6
+ ['Anime DB', "woman1", "Portrait of detailed 1girl, feminine, soldier cinematic shot on canon 5d ultra realistic skin intricate clothes accurate hands Rory Lewis Artgerm WLOP Jeremy Lipking Jane Ansell studio lighting"],
7
+ ['Arcane DB', "woman1", "Oil painting of a beautiful girl arcane style, masterpiece, a high-quality, detailed, and professional photo"],
8
+ ['GTA-5 DB', "man1", "gtav style"],
9
+ ['GTA-5 DB', "woman3", "gtav style"],
10
+ ['Avatar DB', "woman2", "oil painting of a beautiful girl avatar style"],
11
+ ]
12
+
13
+
14
+ def load_db_model(evt: gr.SelectData):
15
+ db_name = gradio_utils.get_db_name_from_id(evt.index)
16
+ return db_name
17
+
18
+
19
+ def canny_select(evt: gr.SelectData):
20
+ canny_name = gradio_utils.get_canny_name_from_id(evt.index)
21
+ return canny_name
22
+
23
 
24
  def create_demo(model: Model):
25
 
26
  with gr.Blocks() as demo:
27
  with gr.Row():
28
+ gr.Markdown('## Text, Canny-Edge and DreamBooth Conditional Video Generation')
29
+ with gr.Row():
30
+ gr.HTML(
31
+ """
32
+ <div style="text-align: left; auto;">
33
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
34
+ Description: TBD
35
+ </h3>
36
+ </div>
37
+ """)
38
+
39
+ with gr.Row():
40
+ gr.Markdown('### You must choose one DB model and one "motion edges" shown below, or use the examples')
41
+
42
+ with gr.Row():
43
+ with gr.Column():
44
+ gr.Markdown("## Selection")
45
+ db_text_field = gr.Markdown('DB Model: **Anime DB** ')
46
+ canny_text_field = gr.Markdown('Motion: **woman1**')
47
+ prompt = gr.Textbox(label='Prompt')
48
+ run_button = gr.Button(label='Run')
49
+ with gr.Column():
50
+ result = gr.Image(label="Generated Video").style(height=400)
51
+
52
+ with gr.Row():
53
+ gallery_db = gr.Gallery(label="Db models", value=[('__assets__/db_files/anime.jpg', "anime"), ('__assets__/db_files/arcane.jpg', "Arcane"), ('__assets__/db_files/gta.jpg', "GTA-5 (Man)"), ('__assets__/db_files/avatar.jpg', "Avatar DB")]).style(grid=[4], height=50)
54
+ with gr.Row():
55
+ gallery_canny = gr.Gallery(label="Motions", value=[('__assets__/db_files/woman1.gif', "woman1"), ('__assets__/db_files/woman2.gif', "woman2"), ('__assets__/db_files/man1.gif', "man1"), ('__assets__/db_files/woman3.gif', "woman3")]).style(grid=[4], height=50)
56
+ predefined_motion = gr.Textbox(visible=False, label='One of the above defined motions')
57
+
58
+ db_selection = gr.Textbox(label="DB Model", visible=False)
59
+ canny_selection = gr.Textbox(label="One of the above defined motions", visible=False)
60
+
61
+ gallery_db.select(load_db_model, None, db_selection)
62
+ gallery_canny.select(canny_select, None, canny_selection)
63
+
64
+ db_selection.change(on_db_selection_update,None,db_text_field)
65
+ canny_selection.change(on_canny_selection_update,None,canny_text_field)
66
+
67
+ inputs = [
68
+ db_selection,
69
+ canny_selection,
70
+ prompt,
71
+ ]
72
+
73
+ gr.Examples(examples=examples,
74
+ inputs=inputs,
75
+ outputs=result,
76
+ fn=model.process_controlnet_canny_db,
77
+ # cache_examples=os.getenv('SYSTEM') == 'spaces',
78
+ )
79
+
80
+ run_button.click(fn=model.process_controlnet_canny_db,
81
+ inputs=inputs,
82
+ outputs=result,)
83
  return demo
84
+
85
+
86
+ def on_db_selection_update(evt : gr.EventData):
87
+
88
+ return f"DB model: **{evt._data}**"
89
+
90
+
91
+ def on_canny_selection_update(evt: gr.EventData):
92
+ return f"Motion: **{evt._data}**"
app_pose.py CHANGED
@@ -3,9 +3,65 @@ import os
3
 
4
  from model import Model
5
 
 
 
 
 
 
 
 
 
6
  def create_demo(model: Model):
7
  with gr.Blocks() as demo:
8
  with gr.Row():
9
- gr.Markdown('## Text and Pose Conditional Video Generation (coming soon)')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  return demo
 
 
 
 
 
 
 
 
3
 
4
  from model import Model
5
 
6
+ examples = [
7
+ ['Motion 1', "A Robot is dancing in Sahara desert"],
8
+ ['Motion 2', "A Robot is dancing in Sahara desert"],
9
+ ['Motion 3', "A Robot is dancing in Sahara desert"],
10
+ ['Motion 4', "A Robot is dancing in Sahara desert"],
11
+ ['Motion 5', "A Robot is dancing in Sahara desert"],
12
+ ]
13
+
14
  def create_demo(model: Model):
15
  with gr.Blocks() as demo:
16
  with gr.Row():
17
+ gr.Markdown('## Text and Pose Conditional Video Generation')
18
+ with gr.Row():
19
+ gr.HTML(
20
+ """
21
+ <div style="text-align: left; auto;">
22
+ <h2 style="font-weight: 450; font-size: 1rem; margin: 0rem">
23
+ Description: TBD
24
+ </h3>
25
+ </div>
26
+ """)
27
+
28
+ with gr.Row():
29
+ gr.Markdown('### You must select one pose sequence shown below, or use the examples')
30
+ with gr.Column():
31
+ gallery_pose_sequence = gr.Gallery(label="Pose Sequence", value=[('__assets__/poses_skeleton_gifs/dance1.gif', "Motion 1"), ('__assets__/poses_skeleton_gifs/dance2.gif', "Motion 2"), ('__assets__/poses_skeleton_gifs/dance3.gif', "Motion 3"), ('__assets__/poses_skeleton_gifs/dance4.gif', "Motion 4"), ('__assets__/poses_skeleton_gifs/dance5.gif', "Motion 5")]).style(grid=[2], height="auto")
32
+ input_video_path = gr.Textbox(label="Pose Sequence",visible=False,value="Motion 1")
33
+ gr.Markdown("## Selection")
34
+ pose_sequence_selector = gr.Markdown('Pose Sequence: **Motion 1**')
35
+ with gr.Column():
36
+ prompt = gr.Textbox(label='Prompt')
37
+ run_button = gr.Button(label='Run')
38
+ with gr.Column():
39
+ result = gr.Image(label="Generated Video")
40
+
41
+ input_video_path.change(on_video_path_update, None, pose_sequence_selector)
42
+ gallery_pose_sequence.select(pose_gallery_callback, None, input_video_path)
43
+ inputs = [
44
+ input_video_path,
45
+ prompt,
46
+ ]
47
+
48
+ gr.Examples(examples=examples,
49
+ inputs=inputs,
50
+ outputs=result,
51
+ # cache_examples=os.getenv('SYSTEM') == 'spaces',
52
+ fn=model.process_controlnet_pose,
53
+ run_on_click=False,
54
+ )
55
+
56
+ run_button.click(fn=model.process_controlnet_pose,
57
+ inputs=inputs,
58
+ outputs=result,)
59
 
60
  return demo
61
+
62
+
63
+ def on_video_path_update(evt: gr.EventData):
64
+ return f'Pose Sequence: **{evt._data}**'
65
+
66
+ def pose_gallery_callback(evt: gr.SelectData):
67
+ return f"Motion {evt.index+1}"