Ming Li commited on
Commit
af5481e
1 Parent(s): e96df59

change default img_num to 2

Browse files
Files changed (8) hide show
  1. app.py +1 -1
  2. app_canny.py +1 -1
  3. app_depth.py +1 -1
  4. app_lineart.py +1 -1
  5. app_openpose.py +1 -1
  6. app_segmentation.py +1 -1
  7. app_softedge.py +1 -1
  8. settings.py +1 -1
app.py CHANGED
@@ -13,7 +13,7 @@ from app_softedge import create_demo as create_demo_softedge
13
  from model import Model
14
  from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
15
 
16
- DESCRIPTION = "# ControlNet++: Improving Conditional Controls with Efficient Consistency Feedback<p>https://arxiv.org/abs/2404.07987</p>"
17
 
18
  if not torch.cuda.is_available():
19
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
 
13
  from model import Model
14
  from settings import ALLOW_CHANGING_BASE_MODEL, DEFAULT_MODEL_ID, SHOW_DUPLICATE_BUTTON
15
 
16
+ DESCRIPTION = "# [ControlNet++: Improving Conditional Controls with Efficient Consistency Feedback](https://arxiv.org/abs/2404.07987)"
17
 
18
  if not torch.cuda.is_available():
19
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
app_canny.py CHANGED
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_depth.py CHANGED
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_lineart.py CHANGED
@@ -56,7 +56,7 @@ def create_demo(process):
56
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
57
  )
58
  with gr.Column():
59
- result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
60
  inputs = [
61
  image,
62
  prompt,
 
56
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
57
  )
58
  with gr.Column():
59
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
60
  inputs = [
61
  image,
62
  prompt,
app_openpose.py CHANGED
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_segmentation.py CHANGED
@@ -46,7 +46,7 @@ def create_demo(process):
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
- result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
 
46
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
47
  )
48
  with gr.Column():
49
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
50
  inputs = [
51
  image,
52
  prompt,
app_softedge.py CHANGED
@@ -55,7 +55,7 @@ def create_demo(process):
55
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
56
  )
57
  with gr.Column():
58
- result = gr.Gallery(label="Output", show_label=False, columns=4, object_fit="scale-down")
59
  inputs = [
60
  image,
61
  prompt,
 
55
  value="longbody, lowres, bad anatomy, bad hands, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality",
56
  )
57
  with gr.Column():
58
+ result = gr.Gallery(label="Output", show_label=False, columns=2, object_fit="scale-down")
59
  inputs = [
60
  image,
61
  prompt,
settings.py CHANGED
@@ -5,7 +5,7 @@ import numpy as np
5
  DEFAULT_MODEL_ID = os.getenv("DEFAULT_MODEL_ID", "runwayml/stable-diffusion-v1-5")
6
 
7
  MAX_NUM_IMAGES = int(os.getenv("MAX_NUM_IMAGES", "4"))
8
- DEFAULT_NUM_IMAGES = min(MAX_NUM_IMAGES, int(os.getenv("DEFAULT_NUM_IMAGES", "4")))
9
  MAX_IMAGE_RESOLUTION = int(os.getenv("MAX_IMAGE_RESOLUTION", "768"))
10
  DEFAULT_IMAGE_RESOLUTION = min(MAX_IMAGE_RESOLUTION, int(os.getenv("DEFAULT_IMAGE_RESOLUTION", "512")))
11
 
 
5
  DEFAULT_MODEL_ID = os.getenv("DEFAULT_MODEL_ID", "runwayml/stable-diffusion-v1-5")
6
 
7
  MAX_NUM_IMAGES = int(os.getenv("MAX_NUM_IMAGES", "4"))
8
+ DEFAULT_NUM_IMAGES = min(MAX_NUM_IMAGES, int(os.getenv("DEFAULT_NUM_IMAGES", "2")))
9
  MAX_IMAGE_RESOLUTION = int(os.getenv("MAX_IMAGE_RESOLUTION", "768"))
10
  DEFAULT_IMAGE_RESOLUTION = min(MAX_IMAGE_RESOLUTION, int(os.getenv("DEFAULT_IMAGE_RESOLUTION", "512")))
11