fi4cr commited on
Commit
7556db3
1 Parent(s): 51f5693
Files changed (2) hide show
  1. app.py +34 -0
  2. requirements.txt +7 -0
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ def infer_segmentation(prompt, negative_prompt, image):
4
+ # your inference function for segmentation control
5
+ return im
6
+
7
+ def infer_canny(prompt, negative_prompt, image):
8
+ # your inference function for canny control
9
+ return im
10
+
11
+ with gr.Blocks(theme='gradio/soft') as demo:
12
+ gr.Markdown("## ControlNet for animal pose")
13
+ gr.Markdown("Under construction ")
14
+
15
+
16
+ with gr.Tab("ControlNet for dogs"):
17
+ prompt_input_canny = gr.Textbox(label="Prompt")
18
+ negative_prompt_canny = gr.Textbox(label="Negative Prompt")
19
+ canny_input = gr.Image(label="Input Image")
20
+ canny_output = gr.Image(label="Output Image")
21
+ submit_btn = gr.Button(value = "Submit")
22
+ canny_inputs = [prompt_input_canny, negative_prompt_canny, canny_input]
23
+ submit_btn.click(fn=infer_canny, inputs=canny_inputs, outputs=[canny_output])
24
+
25
+ with gr.Tab("ControlNet for cats and dogs"):
26
+ prompt_input_seg = gr.Textbox(label="Prompt")
27
+ negative_prompt_seg = gr.Textbox(label="Negative Prompt")
28
+ seg_input = gr.Image(label="Image")
29
+ seg_output = gr.Image(label="Output Image")
30
+ submit_btn = gr.Button(value = "Submit")
31
+ seg_inputs = [prompt_input_seg, negative_prompt_seg, seg_input]
32
+ submit_btn.click(fn=infer_segmentation, inputs=seg_inputs, outputs=[seg_output])
33
+
34
+ demo.launch()
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html
2
+ jax[cuda11_cudnn805]
3
+ jaxlib
4
+ git+https://github.com/huggingface/diffusers@main
5
+ opencv-python
6
+ transformers
7
+ flax