mattyamonaca commited on
Commit
134c8c2
1 Parent(s): 769989d
Files changed (3) hide show
  1. app.py +10 -10
  2. ipadapter/__put_your_lineart_model +0 -0
  3. sd_model.py +28 -17
app.py CHANGED
@@ -11,9 +11,6 @@ import os
11
  import numpy as np
12
  from PIL import Image
13
  import zipfile
14
- import torch
15
-
16
- zero = torch.Tensor([0]).cuda()
17
 
18
  path = os.getcwd()
19
  output_dir = f"{path}/output"
@@ -45,7 +42,7 @@ class webui:
45
  def __init__(self):
46
  self.demo = gr.Blocks()
47
 
48
- def undercoat(self, input_image, pos_prompt, neg_prompt, alpha_th, thickness):
49
  org_line_image = input_image
50
  image = pil2cv(input_image)
51
  image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
@@ -54,18 +51,19 @@ class webui:
54
  image[index] = [255, 255, 255, 255]
55
  input_image = cv2pil(image)
56
 
57
- pipe = get_cn_pipeline()
58
  detectors = get_cn_detector(input_image.resize((1024, 1024), Image.ANTIALIAS))
59
 
60
 
61
- gen_image = generate(pipe, detectors, pos_prompt, neg_prompt)
62
  color_img, unfinished = process(gen_image.resize((image.shape[1], image.shape[0]), Image.ANTIALIAS) , org_line_image, alpha_th, thickness)
63
  #color_img = color_img.resize((image.shape[1], image.shape[0]) , Image.ANTIALIAS)
64
 
65
 
66
  output_img = Image.alpha_composite(color_img, org_line_image)
67
  name = randomname(10)
68
- os.makedirs(f"{output_dir}")
 
69
  os.makedirs(f"{output_dir}/{name}")
70
  output_img.save(f"{output_dir}/{name}/output_image.png")
71
  org_line_image.save(f"{output_dir}/{name}/line_image.png")
@@ -84,13 +82,15 @@ class webui:
84
  with self.demo:
85
  with gr.Row():
86
  with gr.Column():
87
- input_image = gr.Image(type="pil", image_mode="RGBA")
88
-
89
  pos_prompt = gr.Textbox(value="1girl, blue hair, pink shirts, bestquality, 4K", max_lines=1000, label="positive prompt")
90
  neg_prompt = gr.Textbox(value=" (worst quality, low quality:1.2), (lowres:1.2), (bad anatomy:1.2), (greyscale, monochrome:1.4)", max_lines=1000, label="negative prompt")
91
 
92
  alpha_th = gr.Slider(maximum = 255, value=100, label = "alpha threshold")
93
  thickness = gr.Number(value=5, label="Thickness of correction area (Odd numbers need to be entered)")
 
 
 
94
  #gr.Slider(maximum = 21, value=3, step=2, label = "Thickness of correction area")
95
 
96
  submit = gr.Button(value="Start")
@@ -101,7 +101,7 @@ class webui:
101
  output_file = gr.File()
102
  submit.click(
103
  self.undercoat,
104
- inputs=[input_image, pos_prompt, neg_prompt, alpha_th, thickness],
105
  outputs=[output_0, output_file]
106
  )
107
 
 
11
  import numpy as np
12
  from PIL import Image
13
  import zipfile
 
 
 
14
 
15
  path = os.getcwd()
16
  output_dir = f"{path}/output"
 
42
  def __init__(self):
43
  self.demo = gr.Blocks()
44
 
45
+ def undercoat(self, input_image, pos_prompt, neg_prompt, alpha_th, thickness, reference_flg, reference_img):
46
  org_line_image = input_image
47
  image = pil2cv(input_image)
48
  image = cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA)
 
51
  image[index] = [255, 255, 255, 255]
52
  input_image = cv2pil(image)
53
 
54
+ pipe = get_cn_pipeline(reference_flg)
55
  detectors = get_cn_detector(input_image.resize((1024, 1024), Image.ANTIALIAS))
56
 
57
 
58
+ gen_image = generate(pipe, detectors, pos_prompt, neg_prompt, reference_flg, reference_img)
59
  color_img, unfinished = process(gen_image.resize((image.shape[1], image.shape[0]), Image.ANTIALIAS) , org_line_image, alpha_th, thickness)
60
  #color_img = color_img.resize((image.shape[1], image.shape[0]) , Image.ANTIALIAS)
61
 
62
 
63
  output_img = Image.alpha_composite(color_img, org_line_image)
64
  name = randomname(10)
65
+ if not os.path.exists(f"{output_dir}"):
66
+ os.makedirs(f"{output_dir}")
67
  os.makedirs(f"{output_dir}/{name}")
68
  output_img.save(f"{output_dir}/{name}/output_image.png")
69
  org_line_image.save(f"{output_dir}/{name}/line_image.png")
 
82
  with self.demo:
83
  with gr.Row():
84
  with gr.Column():
85
+ input_image = gr.Image(type="pil", image_mode="RGBA", label="lineart")
 
86
  pos_prompt = gr.Textbox(value="1girl, blue hair, pink shirts, bestquality, 4K", max_lines=1000, label="positive prompt")
87
  neg_prompt = gr.Textbox(value=" (worst quality, low quality:1.2), (lowres:1.2), (bad anatomy:1.2), (greyscale, monochrome:1.4)", max_lines=1000, label="negative prompt")
88
 
89
  alpha_th = gr.Slider(maximum = 255, value=100, label = "alpha threshold")
90
  thickness = gr.Number(value=5, label="Thickness of correction area (Odd numbers need to be entered)")
91
+
92
+ reference_image = gr.Image(type="pil", image_mode="RGB", label="reference_image")
93
+ reference_flg = gr.Checkbox(value=True, label="reference_flg")
94
  #gr.Slider(maximum = 21, value=3, step=2, label = "Thickness of correction area")
95
 
96
  submit = gr.Button(value="Start")
 
101
  output_file = gr.File()
102
  submit.click(
103
  self.undercoat,
104
+ inputs=[input_image, pos_prompt, neg_prompt, alpha_th, thickness, reference_image, reference_flg],
105
  outputs=[output_0, output_file]
106
  )
107
 
ipadapter/__put_your_lineart_model ADDED
File without changes
sd_model.py CHANGED
@@ -6,7 +6,7 @@ import spaces
6
 
7
  device = "cuda"
8
 
9
- def get_cn_pipeline():
10
  controlnets = [
11
  ControlNetModel.from_pretrained("./controlnet/lineart", torch_dtype=torch.float16, use_safetensors=True),
12
  ControlNetModel.from_pretrained("mattyamonaca/controlnet_line2line_xl", torch_dtype=torch.float16)
@@ -17,13 +17,12 @@ def get_cn_pipeline():
17
  "cagliostrolab/animagine-xl-3.1", controlnet=controlnets, vae=vae, torch_dtype=torch.float16
18
  )
19
 
20
- #pipe.enable_model_cpu_offload()
21
-
22
- #if pipe.safety_checker is not None:
23
- # pipe.safety_checker = lambda images, **kwargs: (images, [False])
24
-
25
- #pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
26
- #pipe.to(device)
27
 
28
  return pipe
29
 
@@ -51,18 +50,30 @@ def get_cn_detector(image):
51
  return detectors
52
 
53
  @spaces.GPU
54
- def generate(pipe, detectors, prompt, negative_prompt):
55
  pipe.to("cuda")
56
  default_pos = ""
57
  default_neg = ""
58
  prompt = default_pos + prompt
59
  negative_prompt = default_neg + negative_prompt
60
- print(type(pipe))
61
- image = pipe(
62
- prompt=prompt,
63
- negative_prompt = negative_prompt,
64
- image=detectors,
65
- num_inference_steps=50,
66
- controlnet_conditioning_scale=[1.0, 0.2],
67
- ).images[0]
 
 
 
 
 
 
 
 
 
 
 
 
68
  return image
 
6
 
7
  device = "cuda"
8
 
9
+ def get_cn_pipeline(reference_flg):
10
  controlnets = [
11
  ControlNetModel.from_pretrained("./controlnet/lineart", torch_dtype=torch.float16, use_safetensors=True),
12
  ControlNetModel.from_pretrained("mattyamonaca/controlnet_line2line_xl", torch_dtype=torch.float16)
 
17
  "cagliostrolab/animagine-xl-3.1", controlnet=controlnets, vae=vae, torch_dtype=torch.float16
18
  )
19
 
20
+ if reference_flg == True:
21
+ pipe.load_ip_adapter(
22
+ "h94/IP-Adapter",
23
+ subfolder="sdxl_models",
24
+ weight_name="ip-adapter-plus_sdxl_vit-h.bin"
25
+ )
 
26
 
27
  return pipe
28
 
 
50
  return detectors
51
 
52
  @spaces.GPU
53
+ def generate(pipe, detectors, prompt, negative_prompt, reference_flg=False, reference_img=None):
54
  pipe.to("cuda")
55
  default_pos = ""
56
  default_neg = ""
57
  prompt = default_pos + prompt
58
  negative_prompt = default_neg + negative_prompt
59
+
60
+
61
+ if reference_flg==False:
62
+ image = pipe(
63
+ prompt=prompt,
64
+ negative_prompt = negative_prompt,
65
+ image=detectors,
66
+ num_inference_steps=50,
67
+ controlnet_conditioning_scale=[1.0, 0.2],
68
+ ).images[0]
69
+ else:
70
+ image = pipe(
71
+ prompt=prompt,
72
+ negative_prompt = negative_prompt,
73
+ image=detectors,
74
+ num_inference_steps=50,
75
+ controlnet_conditioning_scale=[1.0, 0.2],
76
+ ip_adapter_image=reference_img,
77
+ ).images[0]
78
+
79
  return image