Update app.py
Browse files
app.py
CHANGED
@@ -277,7 +277,7 @@ def run_grounded_sam(input_image, text_prompt, task_type, background_prompt, bac
|
|
277 |
#return [(com_img, 'composite with background'), (green_img, 'green screen'), (alpha_rgb, 'alpha matte')]
|
278 |
return alpha_rgb
|
279 |
|
280 |
-
def infer(video_in, trim_value, text_prompt,
|
281 |
print(prompt)
|
282 |
break_vid = get_frames(video_in)
|
283 |
|
@@ -299,8 +299,8 @@ def infer(video_in, trim_value, text_prompt, task_type, background_prompt, backg
|
|
299 |
image_array = np.array(to_numpy_i)
|
300 |
|
301 |
|
302 |
-
matte_img = run_grounded_sam(image_array, text_prompt,
|
303 |
-
#print(pix2pix_img)
|
304 |
#image = Image.open(pix2pix_img)
|
305 |
#rgb_im = image.convert("RGB")
|
306 |
|
@@ -357,7 +357,7 @@ if __name__ == "__main__":
|
|
357 |
video_in = gr.Video(source='upload', type="filepath")
|
358 |
trim_in = gr.Slider(label="Cut video at (s)", minimun=1, maximum=5, step=1, value=1)
|
359 |
#task_type = gr.Dropdown(["scribble_point", "scribble_box", "text"], value="text", label="Prompt type")
|
360 |
-
task_type = "text"
|
361 |
text_prompt = gr.Textbox(label="Text prompt", placeholder="the girl in the middle")
|
362 |
background_type = gr.Dropdown(["generated_by_text", "real_world_sample"], value="generated_by_text", label="Background type")
|
363 |
background_prompt = gr.Textbox(label="Background prompt", placeholder="downtown area in New York")
|
@@ -387,7 +387,7 @@ if __name__ == "__main__":
|
|
387 |
video_out = gr.Video()
|
388 |
|
389 |
run_button.click(fn=infer, inputs=[
|
390 |
-
video_in, trim_in, text_prompt,
|
391 |
|
392 |
block.launch(debug=args.debug, share=args.share, show_error=True)
|
393 |
#block.queue(concurrency_count=100)
|
|
|
277 |
#return [(com_img, 'composite with background'), (green_img, 'green screen'), (alpha_rgb, 'alpha matte')]
|
278 |
return alpha_rgb
|
279 |
|
280 |
+
def infer(video_in, trim_value, text_prompt, background_prompt, background_type, box_threshold, text_threshold, iou_threshold, scribble_mode, guidance_mode):
|
281 |
print(prompt)
|
282 |
break_vid = get_frames(video_in)
|
283 |
|
|
|
299 |
image_array = np.array(to_numpy_i)
|
300 |
|
301 |
|
302 |
+
matte_img = run_grounded_sam(image_array, text_prompt, "text", background_prompt, background_type, box_threshold, text_threshold, iou_threshold, scribble_mode, guidance_mode)
|
303 |
+
#print(pix2pix_img)u
|
304 |
#image = Image.open(pix2pix_img)
|
305 |
#rgb_im = image.convert("RGB")
|
306 |
|
|
|
357 |
video_in = gr.Video(source='upload', type="filepath")
|
358 |
trim_in = gr.Slider(label="Cut video at (s)", minimun=1, maximum=5, step=1, value=1)
|
359 |
#task_type = gr.Dropdown(["scribble_point", "scribble_box", "text"], value="text", label="Prompt type")
|
360 |
+
#task_type = "text"
|
361 |
text_prompt = gr.Textbox(label="Text prompt", placeholder="the girl in the middle")
|
362 |
background_type = gr.Dropdown(["generated_by_text", "real_world_sample"], value="generated_by_text", label="Background type")
|
363 |
background_prompt = gr.Textbox(label="Background prompt", placeholder="downtown area in New York")
|
|
|
387 |
video_out = gr.Video()
|
388 |
|
389 |
run_button.click(fn=infer, inputs=[
|
390 |
+
video_in, trim_in, text_prompt, background_prompt, background_type, box_threshold, text_threshold, iou_threshold, scribble_mode, guidance_mode], outputs=video_out)
|
391 |
|
392 |
block.launch(debug=args.debug, share=args.share, show_error=True)
|
393 |
#block.queue(concurrency_count=100)
|