Update app.py
Browse files
app.py
CHANGED
@@ -274,7 +274,8 @@ def run_grounded_sam(input_image, text_prompt, task_type, background_prompt, bac
|
|
274 |
### com img with green screen
|
275 |
green_img = alpha_pred[..., None] * image_ori + (1 - alpha_pred[..., None]) * np.array([PALETTE_back], dtype='uint8')
|
276 |
green_img = np.uint8(green_img)
|
277 |
-
return [(com_img, 'composite with background'), (green_img, 'green screen'), (alpha_rgb, 'alpha matte')]
|
|
|
278 |
|
279 |
def infer(prompt,video_in, trim_value):
|
280 |
print(prompt)
|
@@ -354,11 +355,13 @@ if __name__ == "__main__":
|
|
354 |
with gr.Row():
|
355 |
with gr.Column():
|
356 |
video_in = gr.Video(source='upload', type="filepath")
|
|
|
357 |
#task_type = gr.Dropdown(["scribble_point", "scribble_box", "text"], value="text", label="Prompt type")
|
358 |
task_type = "text"
|
359 |
text_prompt = gr.Textbox(label="Text prompt", placeholder="the girl in the middle")
|
360 |
background_type = gr.Dropdown(["generated_by_text", "real_world_sample"], value="generated_by_text", label="Background type")
|
361 |
background_prompt = gr.Textbox(label="Background prompt", placeholder="downtown area in New York")
|
|
|
362 |
run_button = gr.Button(label="Run")
|
363 |
with gr.Accordion("Advanced options", open=False):
|
364 |
box_threshold = gr.Slider(
|
@@ -378,12 +381,13 @@ if __name__ == "__main__":
|
|
378 |
)
|
379 |
|
380 |
with gr.Column():
|
381 |
-
gallery = gr.Gallery(
|
382 |
-
|
383 |
-
).style(preview=True, grid=3, object_fit="scale-down")
|
|
|
384 |
|
385 |
-
run_button.click(fn=
|
386 |
-
|
387 |
|
388 |
block.launch(debug=args.debug, share=args.share, show_error=True)
|
389 |
#block.queue(concurrency_count=100)
|
|
|
274 |
### com img with green screen
|
275 |
green_img = alpha_pred[..., None] * image_ori + (1 - alpha_pred[..., None]) * np.array([PALETTE_back], dtype='uint8')
|
276 |
green_img = np.uint8(green_img)
|
277 |
+
#return [(com_img, 'composite with background'), (green_img, 'green screen'), (alpha_rgb, 'alpha matte')]
|
278 |
+
return alpha_rgb
|
279 |
|
280 |
def infer(prompt,video_in, trim_value):
|
281 |
print(prompt)
|
|
|
355 |
with gr.Row():
|
356 |
with gr.Column():
|
357 |
video_in = gr.Video(source='upload', type="filepath")
|
358 |
+
trim_in = gr.Slider(label="Cut video at (s)", minimun=1, maximum=5, step=1, value=1
|
359 |
#task_type = gr.Dropdown(["scribble_point", "scribble_box", "text"], value="text", label="Prompt type")
|
360 |
task_type = "text"
|
361 |
text_prompt = gr.Textbox(label="Text prompt", placeholder="the girl in the middle")
|
362 |
background_type = gr.Dropdown(["generated_by_text", "real_world_sample"], value="generated_by_text", label="Background type")
|
363 |
background_prompt = gr.Textbox(label="Background prompt", placeholder="downtown area in New York")
|
364 |
+
|
365 |
run_button = gr.Button(label="Run")
|
366 |
with gr.Accordion("Advanced options", open=False):
|
367 |
box_threshold = gr.Slider(
|
|
|
381 |
)
|
382 |
|
383 |
with gr.Column():
|
384 |
+
#gallery = gr.Gallery(
|
385 |
+
# label="Generated images", show_label=True, elem_id="gallery"
|
386 |
+
#).style(preview=True, grid=3, object_fit="scale-down")
|
387 |
+
video_out = gr.Video()
|
388 |
|
389 |
+
run_button.click(fn=infer, inputs=[
|
390 |
+
video_in, trim_in, text_prompt, task_type, background_prompt, background_type, box_threshold, text_threshold, iou_threshold, scribble_mode, guidance_mode], outputs=video_out)
|
391 |
|
392 |
block.launch(debug=args.debug, share=args.share, show_error=True)
|
393 |
#block.queue(concurrency_count=100)
|