okaris commited on
Commit
bae258c
1 Parent(s): d31709b

Release Omni-Zero

Browse files
Files changed (3) hide show
  1. README.md +2 -1
  2. omni_zero.py +64 -1
  3. utils.py +68 -2
README.md CHANGED
@@ -1,6 +1,7 @@
1
  # Omni-Zero: A diffusion pipeline for zero-shot stylized portrait creation.
2
  - [x] Release single person code
3
  - [ ] Release couples code
 
4
 
5
  ## Use Omni-Zero in [fal.ai](https://fal.ai) Workflows [https://fal.ai/dashboard/workflows/okaris/omni-zero](https://fal.ai/dashboard/workflows/okaris/omni-zero)
6
  ![Omni-Zero](https://github.com/okaris/omni-zero/assets/1448702/2ccbdf24-eb41-4a85-975e-af701fc4a879)
@@ -26,4 +27,4 @@ python demo.py
26
  - Special thanks to [fal.ai](https://fal.ai) for providing compute for the research and hosting
27
  - This project wouldn't be possible without the great work of the [InstantX Team](https://github.com/InstantID)
28
  - Thanks to [@fofrAI](http://twitter.com/fofrAI) for inspiring me with his [face-to-many workflow](https://github.com/fofr/cog-face-to-many)
29
- - Thanks to Matteo ([@cubiq](https://twitter.com/cubiq])) for creating the ComfyUI nodes for IP-Adapter
 
1
  # Omni-Zero: A diffusion pipeline for zero-shot stylized portrait creation.
2
  - [x] Release single person code
3
  - [ ] Release couples code
4
+ - [ ] Add LoRA support
5
 
6
  ## Use Omni-Zero in [fal.ai](https://fal.ai) Workflows [https://fal.ai/dashboard/workflows/okaris/omni-zero](https://fal.ai/dashboard/workflows/okaris/omni-zero)
7
  ![Omni-Zero](https://github.com/okaris/omni-zero/assets/1448702/2ccbdf24-eb41-4a85-975e-af701fc4a879)
 
27
  - Special thanks to [fal.ai](https://fal.ai) for providing compute for the research and hosting
28
  - This project wouldn't be possible without the great work of the [InstantX Team](https://github.com/InstantID)
29
  - Thanks to [@fofrAI](http://twitter.com/fofrAI) for inspiring me with his [face-to-many workflow](https://github.com/fofr/cog-face-to-many)
30
+ - Thanks to Matteo ([@cubiq](https://twitter.com/cubiq])) for creating the ComfyUI nodes for IP-Adapter
omni_zero.py CHANGED
@@ -57,6 +57,7 @@ class OmniZeroSingle():
57
  self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++", final_sigmas_type="zero")
58
 
59
  self.pipeline.load_ip_adapter(["okaris/ip-adapter-instantid", "h94/IP-Adapter", "h94/IP-Adapter"], subfolder=[None, "sdxl_models", "sdxl_models"], weight_name=["ip-adapter-instantid.bin", "ip-adapter-plus_sdxl_vit-h.safetensors", "ip-adapter-plus_sdxl_vit-h.safetensors"])
 
60
  def get_largest_face_embedding_and_kps(self, image, target_image=None):
61
  face_info = self.face_analysis.get(cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR))
62
  if len(face_info) == 0:
@@ -156,4 +157,66 @@ class OmniZeroSingle():
156
  seed=seed,
157
  ).images
158
 
159
- return images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++", final_sigmas_type="zero")
58
 
59
  self.pipeline.load_ip_adapter(["okaris/ip-adapter-instantid", "h94/IP-Adapter", "h94/IP-Adapter"], subfolder=[None, "sdxl_models", "sdxl_models"], weight_name=["ip-adapter-instantid.bin", "ip-adapter-plus_sdxl_vit-h.safetensors", "ip-adapter-plus_sdxl_vit-h.safetensors"])
60
+
61
  def get_largest_face_embedding_and_kps(self, image, target_image=None):
62
  face_info = self.face_analysis.get(cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR))
63
  if len(face_info) == 0:
 
157
  seed=seed,
158
  ).images
159
 
160
+ return images
161
+
162
+ class OmniZeroCouple():
163
+ def __init__(self,
164
+ base_model="stabilityai/stable-diffusion-xl-base-1.0",
165
+ ):
166
+ snapshot_download("okaris/antelopev2", local_dir="./models/antelopev2")
167
+ self.face_analysis = FaceAnalysis(name='antelopev2', root='./', providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
168
+ self.face_analysis.prepare(ctx_id=0, det_size=(640, 640))
169
+
170
+ dtype = torch.float16
171
+
172
+ ip_adapter_plus_image_encoder = CLIPVisionModelWithProjection.from_pretrained(
173
+ "h94/IP-Adapter",
174
+ subfolder="models/image_encoder",
175
+ torch_dtype=dtype,
176
+ ).to("cuda")
177
+
178
+ zoedepthnet_path = "okaris/zoe-depth-controlnet-xl"
179
+ zoedepthnet = ControlNetModel.from_pretrained(zoedepthnet_path,torch_dtype=dtype).to("cuda")
180
+
181
+ identitiynet_path = "okaris/face-controlnet-xl"
182
+ identitynet = ControlNetModel.from_pretrained(identitiynet_path, torch_dtype=dtype).to("cuda")
183
+
184
+ self.zoe_depth_detector = ZoeDetector.from_pretrained("lllyasviel/Annotators").to("cuda")
185
+
186
+ self.pipeline = OmniZeroPipeline.from_pretrained(
187
+ base_model,
188
+ controlnet=[identitynet, zoedepthnet],
189
+ torch_dtype=dtype,
190
+ image_encoder=ip_adapter_plus_image_encoder,
191
+ ).to("cuda")
192
+
193
+ config = self.pipeline.scheduler.config
194
+ config["timestep_spacing"] = "trailing"
195
+ self.pipeline.scheduler = DPMSolverMultistepScheduler.from_config(config, use_karras_sigmas=True, algorithm_type="sde-dpmsolver++", final_sigmas_type="zero")
196
+
197
+ self.pipeline.load_ip_adapter(["okaris/ip-adapter-instantid", "okaris/ip-adapter-instantid", "h94/IP-Adapter", "h94/IP-Adapter"], subfolder=[None, None, "sdxl_models", "sdxl_models"], weight_name=["ip-adapter-instantid.bin", "ip-adapter-instantid.bin", "ip-adapter-plus_sdxl_vit-h.safetensors", "ip-adapter-plus_sdxl_vit-h.safetensors"])
198
+
199
+ def generate(self,
200
+ seed=42,
201
+ prompt="A person",
202
+ negative_prompt="blurry, out of focus",
203
+ guidance_scale=3.0,
204
+ number_of_images=1,
205
+ number_of_steps=10,
206
+ base_image=None,
207
+ base_image_strength=0.15,
208
+ composition_image=None,
209
+ composition_image_strength=1.0,
210
+ style_image=None,
211
+ style_image_strength=1.0,
212
+ style_image_2=None,
213
+ style_image_strength_2=1.0,
214
+ identity_image=None,
215
+ identity_image_strength=1.0,
216
+ identity_image_2=None,
217
+ identity_image_strength_2=1.0,
218
+ depth_image=None,
219
+ depth_image_strength=0.5,
220
+ ):
221
+ #Not implemented yet
222
+ print("Not implemented yet")
utils.py CHANGED
@@ -1,11 +1,24 @@
1
  import math
2
  import PIL
 
3
  import cv2
4
  import numpy as np
5
 
6
  from diffusers.utils import load_image
7
 
8
  def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]):
 
 
 
 
 
 
 
 
 
 
 
 
9
  stickwidth = 4
10
  limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
11
  kps = np.array(kps)
@@ -41,8 +54,20 @@ def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255),
41
 
42
 
43
  def load_and_resize_image(image_path, max_width, max_height, maintain_aspect_ratio=True):
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  # Open the image
45
- # image = Image.open(image_path)
46
  image = load_image(image_path)
47
 
48
  # Get the current width and height of the image
@@ -73,7 +98,6 @@ def load_and_resize_image(image_path, max_width, max_height, maintain_aspect_rat
73
 
74
  return resized_image
75
 
76
- from PIL import Image
77
 
78
  def align_images(image1, image2):
79
  """
@@ -97,3 +121,45 @@ def align_images(image1, image2):
97
  image2 = image2.crop((0, 0, new_width, new_height))
98
 
99
  return image1, image2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import math
2
  import PIL
3
+ from PIL import Image
4
  import cv2
5
  import numpy as np
6
 
7
  from diffusers.utils import load_image
8
 
9
  def draw_kps(image_pil, kps, color_list=[(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255)]):
10
+ """
11
+ Draw keypoints on an image.
12
+
13
+ Args:
14
+ image_pil (PIL.Image): Image on which to draw the keypoints.
15
+ kps (list): List of keypoints to draw.
16
+ color_list (list): List of colors to use for drawing the keypoints.
17
+
18
+ Returns:
19
+ PIL.Image: Image with keypoints drawn on it.
20
+ """
21
+
22
  stickwidth = 4
23
  limbSeq = np.array([[0, 2], [1, 2], [3, 2], [4, 2]])
24
  kps = np.array(kps)
 
54
 
55
 
56
  def load_and_resize_image(image_path, max_width, max_height, maintain_aspect_ratio=True):
57
+ """
58
+ Load and resize an image to the specified dimensions.
59
+
60
+ Args:
61
+ image_path (str): Path to the image file.
62
+ max_width (int): Maximum width of the resized image.
63
+ max_height (int): Maximum height of the resized image.
64
+ maintain_aspect_ratio (bool): Whether to maintain the aspect ratio of the image.
65
+
66
+ Returns:
67
+ PIL.Image: Resized image.
68
+ """
69
+
70
  # Open the image
 
71
  image = load_image(image_path)
72
 
73
  # Get the current width and height of the image
 
98
 
99
  return resized_image
100
 
 
101
 
102
  def align_images(image1, image2):
103
  """
 
121
  image2 = image2.crop((0, 0, new_width, new_height))
122
 
123
  return image1, image2
124
+
125
+ def align_images_2(image1, image2):
126
+ """
127
+ Resize and crop the second image to match the dimensions of the first image by
128
+ scaling to aspect fill and then center cropping the extra parts.
129
+
130
+ Args:
131
+ image1 (PIL.Image): First image which will act as the reference for alignment.
132
+ image2 (PIL.Image): Second image to be aligned to the first image's dimensions.
133
+
134
+ Returns:
135
+ tuple: A tuple containing the first image and the aligned second image.
136
+ """
137
+ # Get dimensions of the first image
138
+ target_width, target_height = image1.size
139
+
140
+ # Calculate the aspect ratio of the second image
141
+ aspect_ratio = image2.width / image2.height
142
+
143
+ # Calculate dimensions to aspect fill
144
+ if target_width / target_height > aspect_ratio:
145
+ # The first image is wider relative to its height than the second image
146
+ fill_height = target_height
147
+ fill_width = int(fill_height * aspect_ratio)
148
+ else:
149
+ # The first image is taller relative to its width than the second image
150
+ fill_width = target_width
151
+ fill_height = int(fill_width / aspect_ratio)
152
+
153
+ # Resize the second image to fill dimensions
154
+ filled_image = image2.resize((fill_width, fill_height), Image.Resampling.LANCZOS)
155
+
156
+ # Calculate top-left corner of crop box to center crop
157
+ left = (fill_width - target_width) / 2
158
+ top = (fill_height - target_height) / 2
159
+ right = left + target_width
160
+ bottom = top + target_height
161
+
162
+ # Crop the filled image to match the size of the first image
163
+ cropped_image = filled_image.crop((int(left), int(top), int(right), int(bottom)))
164
+
165
+ return image1, cropped_image