Ming Li commited on
Commit
da031b9
β€’
1 Parent(s): de3d9dc
.vscode/settings.json CHANGED
@@ -3,7 +3,7 @@
3
  "editor.defaultFormatter": "ms-python.black-formatter",
4
  "editor.formatOnType": true,
5
  "editor.codeActionsOnSave": {
6
- "source.organizeImports": true
7
  }
8
  },
9
  "black-formatter.args": [
 
3
  "editor.defaultFormatter": "ms-python.black-formatter",
4
  "editor.formatOnType": true,
5
  "editor.codeActionsOnSave": {
6
+ "source.organizeImports": "explicit"
7
  }
8
  },
9
  "black-formatter.args": [
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: ControlNet V1.1
3
  emoji: πŸ“‰
4
  colorFrom: yellow
5
  colorTo: green
 
1
  ---
2
+ title: ControlNet++
3
  emoji: πŸ“‰
4
  colorFrom: yellow
5
  colorTo: green
app.py CHANGED
@@ -20,7 +20,7 @@ from app_softedge import create_demo as create_demo_softedge
20
  from model import Model
21
  from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
22
 
23
- DESCRIPTION = "# ControlNet v1.1"
24
 
25
  if not torch.cuda.is_available():
26
  DESCRIPTION += "\n<p>Running on CPU πŸ₯Ά This demo does not work on CPU.</p>"
@@ -38,28 +38,14 @@ with gr.Blocks(css="style.css") as demo:
38
  with gr.Tabs():
39
  with gr.TabItem("Canny"):
40
  create_demo_canny(model.process_canny)
41
- with gr.TabItem("MLSD"):
42
- create_demo_mlsd(model.process_mlsd)
43
- with gr.TabItem("Scribble"):
44
- create_demo_scribble(model.process_scribble)
45
- with gr.TabItem("Scribble Interactive"):
46
- create_demo_scribble_interactive(model.process_scribble_interactive)
47
  with gr.TabItem("SoftEdge"):
48
  create_demo_softedge(model.process_softedge)
49
- with gr.TabItem("OpenPose"):
50
- create_demo_openpose(model.process_openpose)
51
  with gr.TabItem("Segmentation"):
52
  create_demo_segmentation(model.process_segmentation)
53
  with gr.TabItem("Depth"):
54
  create_demo_depth(model.process_depth)
55
- with gr.TabItem("Normal map"):
56
- create_demo_normal(model.process_normal)
57
  with gr.TabItem("Lineart"):
58
  create_demo_lineart(model.process_lineart)
59
- with gr.TabItem("Content Shuffle"):
60
- create_demo_shuffle(model.process_shuffle)
61
- with gr.TabItem("Instruct Pix2Pix"):
62
- create_demo_ip2p(model.process_ip2p)
63
 
64
  with gr.Accordion(label="Base model", open=False):
65
  with gr.Row():
 
20
  from model import Model
21
  from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
22
 
23
+ DESCRIPTION = "# ControlNet++: Improving Conditional Controls with Efficient Consistency Feedback<p>https://arxiv.org/abs/2404.07987</p>"
24
 
25
  if not torch.cuda.is_available():
26
  DESCRIPTION += "\n<p>Running on CPU πŸ₯Ά This demo does not work on CPU.</p>"
 
38
  with gr.Tabs():
39
  with gr.TabItem("Canny"):
40
  create_demo_canny(model.process_canny)
 
 
 
 
 
 
41
  with gr.TabItem("SoftEdge"):
42
  create_demo_softedge(model.process_softedge)
 
 
43
  with gr.TabItem("Segmentation"):
44
  create_demo_segmentation(model.process_segmentation)
45
  with gr.TabItem("Depth"):
46
  create_demo_depth(model.process_depth)
 
 
47
  with gr.TabItem("Lineart"):
48
  create_demo_lineart(model.process_lineart)
 
 
 
 
49
 
50
  with gr.Accordion(label="Base model", open=False):
51
  with gr.Row():
app_canny.py CHANGED
@@ -37,7 +37,7 @@ def create_demo(process):
37
  label="Canny high threshold", minimum=1, maximum=255, value=200, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
37
  label="Canny high threshold", minimum=1, maximum=255, value=200, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_depth.py CHANGED
@@ -37,7 +37,7 @@ def create_demo(process):
37
  label="Preprocess resolution", minimum=128, maximum=512, value=384, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
37
  label="Preprocess resolution", minimum=128, maximum=512, value=384, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_ip2p.py CHANGED
@@ -31,7 +31,7 @@ def create_demo(process):
31
  step=256,
32
  )
33
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
34
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
35
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
36
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
37
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -40,7 +40,7 @@ def create_demo(process):
40
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
41
  )
42
  with gr.Column():
43
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
44
  inputs = [
45
  image,
46
  prompt,
 
31
  step=256,
32
  )
33
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
34
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
35
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
36
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
37
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
40
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
41
  )
42
  with gr.Column():
43
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
44
  inputs = [
45
  image,
46
  prompt,
app_lineart.py CHANGED
@@ -31,7 +31,7 @@ def create_demo(process):
31
  ],
32
  type="value",
33
  value="Lineart",
34
- info='Note that "Lineart (anime)" and "None (anime)" are for anime base models like Anything-v3.',
35
  )
36
  num_samples = gr.Slider(
37
  label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
@@ -47,7 +47,7 @@ def create_demo(process):
47
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
48
  )
49
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
50
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
51
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
52
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
53
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -56,7 +56,7 @@ def create_demo(process):
56
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
57
  )
58
  with gr.Column():
59
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
60
  inputs = [
61
  image,
62
  prompt,
 
31
  ],
32
  type="value",
33
  value="Lineart",
34
+ info='Please note that the only difference here is pre-processing. We use exactly the same ControlNet++ model to generate images.',
35
  )
36
  num_samples = gr.Slider(
37
  label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
 
47
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
48
  )
49
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
50
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
51
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
52
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
53
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
56
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
57
  )
58
  with gr.Column():
59
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
60
  inputs = [
61
  image,
62
  prompt,
app_mlsd.py CHANGED
@@ -40,7 +40,7 @@ def create_demo(process):
40
  label="Hough distance threshold (MLSD)", minimum=0.01, maximum=20.0, value=0.1, step=0.01
41
  )
42
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
43
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
44
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
45
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
46
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -49,7 +49,7 @@ def create_demo(process):
49
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
50
  )
51
  with gr.Column():
52
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
53
  inputs = [
54
  image,
55
  prompt,
 
40
  label="Hough distance threshold (MLSD)", minimum=0.01, maximum=20.0, value=0.1, step=0.01
41
  )
42
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
43
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
44
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
45
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
46
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
49
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
50
  )
51
  with gr.Column():
52
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
53
  inputs = [
54
  image,
55
  prompt,
app_normal.py CHANGED
@@ -37,7 +37,7 @@ def create_demo(process):
37
  label="Preprocess resolution", minimum=128, maximum=512, value=384, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
37
  label="Preprocess resolution", minimum=128, maximum=512, value=384, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_openpose.py CHANGED
@@ -37,7 +37,7 @@ def create_demo(process):
37
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
37
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_scribble.py CHANGED
@@ -37,7 +37,7 @@ def create_demo(process):
37
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
37
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_scribble_interactive.py CHANGED
@@ -51,7 +51,7 @@ def create_demo(process):
51
  step=256,
52
  )
53
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
54
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
55
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
56
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
57
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -60,7 +60,7 @@ def create_demo(process):
60
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
61
  )
62
  with gr.Column():
63
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
64
 
65
  create_button.click(
66
  fn=create_canvas,
 
51
  step=256,
52
  )
53
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
54
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
55
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
56
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
57
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
60
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
61
  )
62
  with gr.Column():
63
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
64
 
65
  create_button.click(
66
  fn=create_canvas,
app_segmentation.py CHANGED
@@ -37,7 +37,7 @@ def create_demo(process):
37
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
37
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
38
  )
39
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
40
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
41
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
42
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
43
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_shuffle.py CHANGED
@@ -34,7 +34,7 @@ def create_demo(process):
34
  step=256,
35
  )
36
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
37
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
38
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
39
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
40
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -43,7 +43,7 @@ def create_demo(process):
43
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
44
  )
45
  with gr.Column():
46
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
47
  inputs = [
48
  image,
49
  prompt,
 
34
  step=256,
35
  )
36
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
37
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
38
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
39
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
40
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
43
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
44
  )
45
  with gr.Column():
46
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
47
  inputs = [
48
  image,
49
  prompt,
app_softedge.py CHANGED
@@ -30,7 +30,7 @@ def create_demo(process):
30
  "None",
31
  ],
32
  type="value",
33
- value="PidiNet",
34
  )
35
  num_samples = gr.Slider(
36
  label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
@@ -46,7 +46,7 @@ def create_demo(process):
46
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
47
  )
48
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
49
- guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
50
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
51
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
52
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
@@ -55,7 +55,7 @@ def create_demo(process):
55
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
56
  )
57
  with gr.Column():
58
- result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
59
  inputs = [
60
  image,
61
  prompt,
 
30
  "None",
31
  ],
32
  type="value",
33
+ value="HED",
34
  )
35
  num_samples = gr.Slider(
36
  label="Number of images", minimum=1, maximum=MAX_NUM_IMAGES, value=DEFAULT_NUM_IMAGES, step=1
 
46
  label="Preprocess resolution", minimum=128, maximum=512, value=512, step=1
47
  )
48
  num_steps = gr.Slider(label="Number of steps", minimum=1, maximum=100, value=20, step=1)
49
+ guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=30.0, value=7.5, step=0.1)
50
  seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=0)
51
  randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
52
  a_prompt = gr.Textbox(label="Additional prompt", value="best quality, extremely detailed")
 
55
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
56
  )
57
  with gr.Column():
58
+ result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
59
  inputs = [
60
  image,
61
  prompt,
model.py CHANGED
@@ -18,19 +18,15 @@ from preprocessor import Preprocessor
18
  from settings import MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES
19
 
20
  CONTROLNET_MODEL_IDS = {
21
- "Openpose": "lllyasviel/control_v11p_sd15_openpose",
22
- "Canny": "lllyasviel/control_v11p_sd15_canny",
23
- "MLSD": "lllyasviel/control_v11p_sd15_mlsd",
24
- "scribble": "lllyasviel/control_v11p_sd15_scribble",
25
- "softedge": "lllyasviel/control_v11p_sd15_softedge",
26
- "segmentation": "lllyasviel/control_v11p_sd15_seg",
27
- "depth": "lllyasviel/control_v11f1p_sd15_depth",
28
- "NormalBae": "lllyasviel/control_v11p_sd15_normalbae",
29
- "lineart": "lllyasviel/control_v11p_sd15_lineart",
30
- "lineart_anime": "lllyasviel/control_v11p_sd15s2_lineart_anime",
31
- "shuffle": "lllyasviel/control_v11e_sd15_shuffle",
32
- "ip2p": "lllyasviel/control_v11e_sd15_ip2p",
33
- "inpaint": "lllyasviel/control_v11e_sd15_inpaint",
34
  }
35
 
36
 
@@ -580,6 +576,7 @@ class Model:
580
  image_resolution=image_resolution,
581
  detect_resolution=preprocess_resolution,
582
  )
 
583
  if "anime" in preprocessor_name:
584
  self.load_controlnet_weight("lineart_anime")
585
  else:
 
18
  from settings import MAX_IMAGE_RESOLUTION, MAX_NUM_IMAGES
19
 
20
  CONTROLNET_MODEL_IDS = {
21
+ "Canny": "../diffusers/work_dirs/reward_model/MultiGen20M_Canny/reward_ft5k_canny_res512_bs256_lr1e-5_warmup100_scale-10_iter10k_fp16_train0-1k_reward0-200_denormalized-img_gradients-with-threshold0.05-mse-loss/checkpoint-10000/controlnet",
22
+
23
+ "softedge": "../diffusers/work_dirs/reward_model/MultiGen20M_Hed/reward_ft5k_controlnet_sd15_hed_res512_bs256_lr1e-5_warmup100_scale-1_iter10k_fp16_train0-1k_reward0-200/checkpoint-10000/controlnet",
24
+
25
+ "segmentation": "../diffusers/work_dirs/reward_model/Captioned_ADE20K/reward_ft_controlnet_sd15_seg_res512_bs256_lr1e-5_warmup100_scale-0.5_iter5k_fp16_train0-1k_reward0-200_FCN-R101-d8/checkpoint-5000/controlnet",
26
+
27
+ "depth": "../diffusers/work_dirs/reward_model/MultiGen20M_Depth/reward_ft5k_controlnet_sd15_depth_res512_bs256_lr1e-5_warmup100_scale-1.0_iter10k_fp16_train0-1k_reward0-200_mse-loss/checkpoint-10000/controlnet",
28
+
29
+ "lineart": "../diffusers/work_dirs/reward_model/MultiGen20M_LineDrawing/reward_ft5k_controlnet_sd15_lineart_res512_bs256_lr1e-5_warmup100_scale-10_iter10k_fp16_train0-1k_reward0-200/checkpoint-10000/controlnet",
 
 
 
 
30
  }
31
 
32
 
 
576
  image_resolution=image_resolution,
577
  detect_resolution=preprocess_resolution,
578
  )
579
+ # NOTE: We still use the general lineart model
580
  if "anime" in preprocessor_name:
581
  self.load_controlnet_weight("lineart_anime")
582
  else: