animal_pose / app.py
fi4cr
Initial
7556db3
import gradio as gr
def infer_segmentation(prompt, negative_prompt, image):
# your inference function for segmentation control
return im
def infer_canny(prompt, negative_prompt, image):
# your inference function for canny control
return im
with gr.Blocks(theme='gradio/soft') as demo:
gr.Markdown("## ControlNet for animal pose")
gr.Markdown("Under construction ")
with gr.Tab("ControlNet for dogs"):
prompt_input_canny = gr.Textbox(label="Prompt")
negative_prompt_canny = gr.Textbox(label="Negative Prompt")
canny_input = gr.Image(label="Input Image")
canny_output = gr.Image(label="Output Image")
submit_btn = gr.Button(value = "Submit")
canny_inputs = [prompt_input_canny, negative_prompt_canny, canny_input]
submit_btn.click(fn=infer_canny, inputs=canny_inputs, outputs=[canny_output])
with gr.Tab("ControlNet for cats and dogs"):
prompt_input_seg = gr.Textbox(label="Prompt")
negative_prompt_seg = gr.Textbox(label="Negative Prompt")
seg_input = gr.Image(label="Image")
seg_output = gr.Image(label="Output Image")
submit_btn = gr.Button(value = "Submit")
seg_inputs = [prompt_input_seg, negative_prompt_seg, seg_input]
submit_btn.click(fn=infer_segmentation, inputs=seg_inputs, outputs=[seg_output])
demo.launch()