fffiloni commited on
Commit
4438f8a
β€’
1 Parent(s): 8852968

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -8
app.py CHANGED
@@ -38,7 +38,7 @@ from model import Model
38
  model = Model()
39
 
40
 
41
- def controlnet(i, prompt, control_task, seed_in):
42
  img= Image.open(i)
43
  np_img = np.array(img)
44
 
@@ -47,12 +47,13 @@ def controlnet(i, prompt, control_task, seed_in):
47
  num_samples = 1
48
  image_resolution = 512
49
  detect_resolution = 512
50
- ddim_steps = 20
51
- scale = 9.0
52
  eta = 0.0
 
 
 
53
  if control_task == 'Canny':
54
  result = model.process_canny(np_img, prompt, a_prompt, n_prompt, num_samples,
55
- image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta)
56
  elif control_task == 'Depth':
57
  result = model.process_depth(np_img, prompt, a_prompt, n_prompt, num_samples,
58
  image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta)
@@ -112,7 +113,7 @@ def create_video(frames, fps):
112
  return 'movie.mp4'
113
 
114
 
115
- def infer(prompt,video_in, control_task, seed_in, trim_value):
116
  print(f"""
117
  β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
118
  {prompt}
@@ -133,7 +134,7 @@ def infer(prompt,video_in, control_task, seed_in, trim_value):
133
  print("set stop frames to: " + str(n_frame))
134
 
135
  for i in frames_list[0:int(n_frame)]:
136
- controlnet_img = controlnet(i, prompt,control_task, seed_in)
137
  #images = controlnet_img[0]
138
  #rgb_im = images[0].convert("RGB")
139
 
@@ -204,11 +205,21 @@ with gr.Blocks(css='style.css') as demo:
204
  share_button = gr.Button("Share to community", elem_id="share-btn")
205
  with gr.Column():
206
  #status = gr.Textbox()
207
- prompt = gr.Textbox(label="Prompt", placeholder="enter prompt", show_label=False, elem_id="prompt-in")
208
  control_task = gr.Dropdown(label="Control Task", choices=["Canny", "Depth", "Pose"], value="Pose", multiselect=False)
209
  with gr.Row():
210
  seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=123456)
211
  trim_in = gr.Slider(label="Cut video at (s)", minimun=1, maximum=5, step=1, value=1)
 
 
 
 
 
 
 
 
 
 
212
  gr.HTML("""
213
  <a style="display:inline-block" href="https://huggingface.co/spaces/fffiloni/Pix2Pix-Video?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
214
  work with longer videos / skip the queue:
@@ -217,7 +228,7 @@ with gr.Blocks(css='style.css') as demo:
217
 
218
 
219
 
220
- inputs = [prompt,video_inp,control_task, seed_inp, trim_in]
221
  outputs = [video_out, share_group]
222
  #outputs = [status]
223
 
 
38
  model = Model()
39
 
40
 
41
+ def controlnet(i, prompt, control_task, seed_in, ddim_steps, scale):
42
  img= Image.open(i)
43
  np_img = np.array(img)
44
 
 
47
  num_samples = 1
48
  image_resolution = 512
49
  detect_resolution = 512
 
 
50
  eta = 0.0
51
+ low_threshold = 100
52
+ high_threshold = 200
53
+
54
  if control_task == 'Canny':
55
  result = model.process_canny(np_img, prompt, a_prompt, n_prompt, num_samples,
56
+ image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta, low_threshold, high_threshold)
57
  elif control_task == 'Depth':
58
  result = model.process_depth(np_img, prompt, a_prompt, n_prompt, num_samples,
59
  image_resolution, detect_resolution, ddim_steps, scale, seed_in, eta)
 
113
  return 'movie.mp4'
114
 
115
 
116
+ def infer(prompt,video_in, control_task, seed_in, trim_value, ddim_steps, scale):
117
  print(f"""
118
  β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”
119
  {prompt}
 
134
  print("set stop frames to: " + str(n_frame))
135
 
136
  for i in frames_list[0:int(n_frame)]:
137
+ controlnet_img = controlnet(i, prompt,control_task, seed_in, ddim_steps, scale)
138
  #images = controlnet_img[0]
139
  #rgb_im = images[0].convert("RGB")
140
 
 
205
  share_button = gr.Button("Share to community", elem_id="share-btn")
206
  with gr.Column():
207
  #status = gr.Textbox()
208
+ prompt = gr.Textbox(label="Prompt", placeholder="enter prompt", show_label=True, elem_id="prompt-in")
209
  control_task = gr.Dropdown(label="Control Task", choices=["Canny", "Depth", "Pose"], value="Pose", multiselect=False)
210
  with gr.Row():
211
  seed_inp = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, value=123456)
212
  trim_in = gr.Slider(label="Cut video at (s)", minimun=1, maximum=5, step=1, value=1)
213
+ ddim_steps = gr.Slider(label='Steps',
214
+ minimum=1,
215
+ maximum=100,
216
+ value=20,
217
+ step=1)
218
+ scale = gr.Slider(label='Guidance Scale',
219
+ minimum=0.1,
220
+ maximum=30.0,
221
+ value=9.0,
222
+ step=0.1)
223
  gr.HTML("""
224
  <a style="display:inline-block" href="https://huggingface.co/spaces/fffiloni/Pix2Pix-Video?duplicate=true"><img src="https://img.shields.io/badge/-Duplicate%20Space-blue?labelColor=white&style=flat&logo=data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAAXNSR0IArs4c6QAAAP5JREFUOE+lk7FqAkEURY+ltunEgFXS2sZGIbXfEPdLlnxJyDdYB62sbbUKpLbVNhyYFzbrrA74YJlh9r079973psed0cvUD4A+4HoCjsA85X0Dfn/RBLBgBDxnQPfAEJgBY+A9gALA4tcbamSzS4xq4FOQAJgCDwV2CPKV8tZAJcAjMMkUe1vX+U+SMhfAJEHasQIWmXNN3abzDwHUrgcRGmYcgKe0bxrblHEB4E/pndMazNpSZGcsZdBlYJcEL9Afo75molJyM2FxmPgmgPqlWNLGfwZGG6UiyEvLzHYDmoPkDDiNm9JR9uboiONcBXrpY1qmgs21x1QwyZcpvxt9NS09PlsPAAAAAElFTkSuQmCC&logoWidth=14" alt="Duplicate Space"></a>
225
  work with longer videos / skip the queue:
 
228
 
229
 
230
 
231
+ inputs = [prompt,video_inp,control_task, seed_inp, trim_in, ddim_steps, scale]
232
  outputs = [video_out, share_group]
233
  #outputs = [status]
234