schirrmacher
commited on
Commit
•
ea07c6e
1
Parent(s):
6b540a3
Upload folder using huggingface_hub
Browse files- README.md +4 -6
- create_dataset.sh +27 -21
- example_ground_truth.png +3 -0
- example_image.png +3 -0
- util/merge_images.py +106 -139
- util/test.py +8 -0
README.md
CHANGED
@@ -23,12 +23,10 @@ I created more than 5.000 images with people and more than 5.000 diverse backgro
|
|
23 |
|
24 |
# Examples
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
![](
|
29 |
-
![](
|
30 |
-
![](./backgrounds/background02.png)
|
31 |
-
![](./backgrounds/background03.png)
|
32 |
|
33 |
# Create Training Dataset
|
34 |
|
|
|
23 |
|
24 |
# Examples
|
25 |
|
26 |
+
Here you can see an augmented image and the resulting ground truth:
|
27 |
+
|
28 |
+
![](example_image.png)
|
29 |
+
![](example_ground_truth.png)
|
|
|
|
|
30 |
|
31 |
# Create Training Dataset
|
32 |
|
create_dataset.sh
CHANGED
@@ -1,35 +1,41 @@
|
|
1 |
#!/bin/bash
|
2 |
|
3 |
-
|
4 |
local backgrounds_dir="backgrounds"
|
5 |
-
local
|
6 |
-
|
7 |
-
local image_path="$1"
|
8 |
-
local groundtruth_path="$2"
|
9 |
-
|
10 |
background=$(find "$backgrounds_dir" -type f | shuf -n 1)
|
11 |
-
|
12 |
-
|
|
|
|
|
13 |
python3 "util/merge_images.py" \
|
14 |
-
-b "$background" -
|
15 |
-
-
|
16 |
}
|
17 |
|
18 |
main() {
|
19 |
local max_iterations=2000
|
|
|
|
|
|
|
|
|
20 |
for ((i = 0 ; i <= $max_iterations ; i++)); do
|
21 |
# For quicker creation some parallelization
|
22 |
-
# Notice: last iteration
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
33 |
done
|
34 |
}
|
35 |
|
|
|
1 |
#!/bin/bash
|
2 |
|
3 |
+
merge() {
|
4 |
local backgrounds_dir="backgrounds"
|
5 |
+
local segmentations_dir="humans"
|
6 |
+
|
|
|
|
|
|
|
7 |
background=$(find "$backgrounds_dir" -type f | shuf -n 1)
|
8 |
+
segmentation=$(find "$segmentations_dir" -type f | shuf -n 1)
|
9 |
+
|
10 |
+
echo "Iteration $i: $segmentation + $background"
|
11 |
+
|
12 |
python3 "util/merge_images.py" \
|
13 |
+
-b "$background" -s "$segmentation" \
|
14 |
+
-im "$1" -gt "$2"
|
15 |
}
|
16 |
|
17 |
main() {
|
18 |
local max_iterations=2000
|
19 |
+
local train_gt_path="dataset/training/gt"
|
20 |
+
local train_image_path="dataset/training/im"
|
21 |
+
local validation_gt_path="dataset/validation/gt"
|
22 |
+
local validation_image_path="dataset/validation/im"
|
23 |
for ((i = 0 ; i <= $max_iterations ; i++)); do
|
24 |
# For quicker creation some parallelization
|
25 |
+
# Notice: last iteration is for validation set
|
26 |
+
{
|
27 |
+
merge "$train_image_path" "$train_gt_path" &
|
28 |
+
merge "$train_image_path" "$train_gt_path" &
|
29 |
+
merge "$train_image_path" "$train_gt_path" &
|
30 |
+
merge "$train_image_path" "$train_gt_path" &
|
31 |
+
merge "$train_image_path" "$train_gt_path" &
|
32 |
+
merge "$train_image_path" "$train_gt_path" &
|
33 |
+
merge "$train_image_path" "$train_gt_path" &
|
34 |
+
merge "$train_image_path" "$train_gt_path" &
|
35 |
+
merge "$train_image_path" "$train_gt_path" &
|
36 |
+
merge "$validation_image_path" "$validation_gt_path" &
|
37 |
+
}
|
38 |
+
wait
|
39 |
done
|
40 |
}
|
41 |
|
example_ground_truth.png
ADDED
Git LFS Details
|
example_image.png
ADDED
Git LFS Details
|
util/merge_images.py
CHANGED
@@ -6,66 +6,17 @@ import string
|
|
6 |
import albumentations as A
|
7 |
|
8 |
|
9 |
-
def
|
10 |
-
transform = A.Compose(
|
11 |
-
[
|
12 |
-
A.HorizontalFlip(p=0.5),
|
13 |
-
A.ShiftScaleRotate(
|
14 |
-
shift_limit_x=(-0.3, 0.3),
|
15 |
-
shift_limit_y=(-0.1, 0.6),
|
16 |
-
scale_limit=(1.0, 1.2),
|
17 |
-
border_mode=cv2.BORDER_CONSTANT,
|
18 |
-
rotate_limit=(-3, 3),
|
19 |
-
p=0.7,
|
20 |
-
),
|
21 |
-
]
|
22 |
-
)
|
23 |
-
return transform(image=image)["image"]
|
24 |
-
|
25 |
-
|
26 |
-
def augment_overlay(image):
|
27 |
-
has_alpha = image.shape[2] == 4
|
28 |
-
if has_alpha:
|
29 |
-
alpha_channel = image[:, :, 3]
|
30 |
-
color_channels = image[:, :, :3]
|
31 |
-
else:
|
32 |
-
color_channels = image
|
33 |
-
|
34 |
-
# Define the transformation
|
35 |
-
transform = A.Compose(
|
36 |
-
[
|
37 |
-
A.RandomBrightnessContrast(
|
38 |
-
brightness_limit=(-0.1, 0.1), contrast_limit=(-0.4, 0), p=0.8
|
39 |
-
)
|
40 |
-
]
|
41 |
-
)
|
42 |
-
|
43 |
-
# Apply the transformation only to the color channels
|
44 |
-
transformed = transform(image=color_channels)
|
45 |
-
transformed_image = transformed["image"]
|
46 |
-
|
47 |
-
# Merge the alpha channel back if it was separated
|
48 |
-
if has_alpha:
|
49 |
-
final_image = cv2.merge(
|
50 |
-
(
|
51 |
-
transformed_image[:, :, 0],
|
52 |
-
transformed_image[:, :, 1],
|
53 |
-
transformed_image[:, :, 2],
|
54 |
-
alpha_channel,
|
55 |
-
)
|
56 |
-
)
|
57 |
-
else:
|
58 |
-
final_image = transformed_image
|
59 |
-
return final_image
|
60 |
-
|
61 |
-
|
62 |
-
def augment_result(image):
|
63 |
transform = A.Compose(
|
64 |
[
|
65 |
A.MotionBlur(blur_limit=(5, 11), p=1.0),
|
66 |
A.GaussNoise(var_limit=(10, 150), p=1.0),
|
67 |
-
A.
|
68 |
-
|
|
|
|
|
|
|
|
|
69 |
),
|
70 |
A.RandomFog(
|
71 |
fog_coef_lower=0.05,
|
@@ -89,117 +40,133 @@ def augment_result(image):
|
|
89 |
return transform(image=image)["image"]
|
90 |
|
91 |
|
92 |
-
def
|
93 |
-
|
94 |
mask = image[:, :, 3] < alpha_threshold
|
95 |
image[mask] = [0, 0, 0, 0]
|
96 |
-
|
97 |
return image
|
98 |
|
99 |
|
100 |
-
def
|
|
|
|
|
|
|
|
|
|
|
101 |
letters = string.ascii_lowercase
|
102 |
random_string = "".join(random.choice(letters) for i in range(13))
|
103 |
-
|
104 |
|
105 |
-
background = cv2.imread(background_path, cv2.IMREAD_COLOR)
|
106 |
-
height, width = background.shape[:2]
|
107 |
|
108 |
-
|
|
|
|
|
|
|
109 |
|
110 |
-
if overlay.shape[2] < 4:
|
111 |
-
raise Exception("Overlay image does not have an alpha channel.")
|
112 |
|
113 |
-
|
114 |
-
overlay = apply_scale_and_move(overlay)
|
115 |
|
116 |
-
|
117 |
-
|
118 |
|
119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
120 |
|
121 |
-
#
|
122 |
-
|
123 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
124 |
|
125 |
-
|
126 |
-
max_height = background.shape[0]
|
127 |
-
max_width = background.shape[1]
|
128 |
-
scale_width = max_width
|
129 |
-
scale_height = int(scale_width / aspect_ratio)
|
130 |
|
131 |
-
# Check if the scaled overlay height is too large
|
132 |
-
if scale_height > max_height:
|
133 |
-
scale_height = max_height
|
134 |
-
scale_width = int(scale_height * aspect_ratio)
|
135 |
|
136 |
-
|
137 |
-
overlay_resized = cv2.resize(overlay, (scale_width, scale_height))
|
138 |
|
139 |
-
|
140 |
-
x_pos = (background.shape[1] - scale_width) // 2
|
141 |
-
y_pos = (background.shape[0] - scale_height) // 2
|
142 |
|
143 |
-
|
144 |
-
alpha_mask = overlay_resized[:, :, 3] / 255.0
|
145 |
-
overlay_color = overlay_resized[:, :, :3]
|
146 |
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
]
|
151 |
-
|
|
|
152 |
|
153 |
-
#
|
154 |
-
|
155 |
|
156 |
-
#
|
157 |
-
|
|
|
158 |
|
159 |
-
|
|
|
|
|
160 |
|
161 |
-
|
|
|
162 |
|
|
|
163 |
|
164 |
-
def expand_image_borders_rgba(
|
165 |
-
image, final_width, final_height, border_color=(0, 0, 0, 0)
|
166 |
-
):
|
167 |
-
height, width = image.shape[:2]
|
168 |
|
169 |
-
|
170 |
-
|
171 |
-
|
|
|
|
|
172 |
|
173 |
-
|
174 |
-
|
175 |
-
left = right = (final_width - width) // 2
|
176 |
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
if (final_width - width) % 2 != 0:
|
181 |
-
right += 1
|
182 |
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
)
|
|
|
187 |
|
188 |
-
|
|
|
189 |
|
|
|
|
|
190 |
|
191 |
-
|
192 |
-
|
193 |
-
if image.shape[2] < 4:
|
194 |
-
raise ValueError(
|
195 |
-
"Loaded image does not contain an alpha channel. Make sure the input image is in PNG format with an alpha channel."
|
196 |
-
)
|
197 |
-
|
198 |
-
# Extract the alpha channel
|
199 |
-
image = remove_alpha(image.copy())
|
200 |
-
alpha_channel = image[:, :, 3]
|
201 |
-
# Save or display the alpha channel as a black and white image
|
202 |
-
cv2.imwrite(output_path, alpha_channel)
|
203 |
|
204 |
|
205 |
def main():
|
@@ -210,7 +177,7 @@ def main():
|
|
210 |
"-b", "--background", required=True, help="Path to the background image"
|
211 |
)
|
212 |
parser.add_argument(
|
213 |
-
"-
|
214 |
)
|
215 |
parser.add_argument(
|
216 |
"-im",
|
@@ -233,11 +200,11 @@ def main():
|
|
233 |
if not os.path.exists(args.groundtruth_path):
|
234 |
os.makedirs(args.groundtruth_path)
|
235 |
|
236 |
-
|
237 |
-
args.background,
|
238 |
-
args.
|
239 |
-
args.image_path,
|
240 |
-
args.groundtruth_path,
|
241 |
)
|
242 |
|
243 |
|
|
|
6 |
import albumentations as A
|
7 |
|
8 |
|
9 |
+
def augment_final_image(image):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
transform = A.Compose(
|
11 |
[
|
12 |
A.MotionBlur(blur_limit=(5, 11), p=1.0),
|
13 |
A.GaussNoise(var_limit=(10, 150), p=1.0),
|
14 |
+
A.ColorJitter(
|
15 |
+
brightness=(0.6, 1.0),
|
16 |
+
contrast=(0.6, 1.0),
|
17 |
+
saturation=(0.3, 1),
|
18 |
+
hue=(0.0, 0.1),
|
19 |
+
p=0.5,
|
20 |
),
|
21 |
A.RandomFog(
|
22 |
fog_coef_lower=0.05,
|
|
|
40 |
return transform(image=image)["image"]
|
41 |
|
42 |
|
43 |
+
def remove_alpha_threshold(image, alpha_threshold=160):
|
44 |
+
# This function removes artifacts created by LayerDiffusion
|
45 |
mask = image[:, :, 3] < alpha_threshold
|
46 |
image[mask] = [0, 0, 0, 0]
|
|
|
47 |
return image
|
48 |
|
49 |
|
50 |
+
def create_ground_truth_mask(image):
|
51 |
+
image = remove_alpha_threshold(image.copy())
|
52 |
+
return image[:, :, 3]
|
53 |
+
|
54 |
+
|
55 |
+
def create_random_filename_from_filepath(path):
|
56 |
letters = string.ascii_lowercase
|
57 |
random_string = "".join(random.choice(letters) for i in range(13))
|
58 |
+
return random_string + "_" + os.path.basename(path)
|
59 |
|
|
|
|
|
60 |
|
61 |
+
def scale_image(image, factor=1.5):
|
62 |
+
width = int(image.shape[1] * factor)
|
63 |
+
height = int(image.shape[0] * factor)
|
64 |
+
return cv2.resize(image, (width, height), interpolation=cv2.INTER_LINEAR)
|
65 |
|
|
|
|
|
66 |
|
67 |
+
def augment_and_match_size(image, target_width, target_height):
|
|
|
68 |
|
69 |
+
random_scale = random.uniform(1, 1.5)
|
70 |
+
image = scale_image(image, random_scale)
|
71 |
|
72 |
+
transform = A.Compose(
|
73 |
+
[
|
74 |
+
A.HorizontalFlip(p=0.5),
|
75 |
+
A.ShiftScaleRotate(
|
76 |
+
shift_limit_x=(-0.3, 0.3),
|
77 |
+
shift_limit_y=(0.0, 0.4),
|
78 |
+
scale_limit=(0, 0),
|
79 |
+
border_mode=cv2.BORDER_CONSTANT,
|
80 |
+
rotate_limit=(-5, 5),
|
81 |
+
p=0.7,
|
82 |
+
),
|
83 |
+
]
|
84 |
+
)
|
85 |
+
image = transform(image=image)["image"]
|
86 |
+
|
87 |
+
# Ensure the image matches the target dimensions
|
88 |
+
current_height, current_width = image.shape[:2]
|
89 |
+
|
90 |
+
# Crop if the image is larger than the target size
|
91 |
+
if current_height > target_height or current_width > target_width:
|
92 |
+
# Calculating the top-left point to crop the image
|
93 |
+
start_x = max(0, (current_width - target_width) // 2)
|
94 |
+
start_y = max(0, (current_height - target_height) // 2)
|
95 |
+
image = image[
|
96 |
+
start_y : start_y + target_height, start_x : start_x + target_width
|
97 |
+
]
|
98 |
|
99 |
+
# Pad if the image is smaller than the target size
|
100 |
+
if current_height < target_height or current_width < target_width:
|
101 |
+
delta_w = max(0, target_width - current_width)
|
102 |
+
delta_h = max(0, target_height - current_height)
|
103 |
+
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
|
104 |
+
left, right = delta_w // 2, delta_w - (delta_w // 2)
|
105 |
+
color = [0, 0, 0, 0]
|
106 |
+
image = cv2.copyMakeBorder(
|
107 |
+
image, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color
|
108 |
+
)
|
109 |
|
110 |
+
return image
|
|
|
|
|
|
|
|
|
111 |
|
|
|
|
|
|
|
|
|
112 |
|
113 |
+
def merge_images(background, foreground, position=(0, 0)):
|
|
|
114 |
|
115 |
+
x, y = position
|
|
|
|
|
116 |
|
117 |
+
fh, fw = foreground.shape[:2]
|
|
|
|
|
118 |
|
119 |
+
if x + fw > background.shape[1]:
|
120 |
+
fw = background.shape[1] - x
|
121 |
+
foreground = foreground[:, :fw]
|
122 |
+
if y + fh > background.shape[0]:
|
123 |
+
fh = background.shape[0] - y
|
124 |
+
foreground = foreground[:fh, :]
|
125 |
|
126 |
+
# Region of Interest (ROI) in the background where the foreground will be placed
|
127 |
+
roi = background[y : y + fh, x : x + fw]
|
128 |
|
129 |
+
# Split the foreground image into its color and alpha channels
|
130 |
+
foreground_color = foreground[:, :, :3]
|
131 |
+
alpha = foreground[:, :, 3] / 255.0
|
132 |
|
133 |
+
# Blend the images based on the alpha channel
|
134 |
+
for c in range(0, 3):
|
135 |
+
roi[:, :, c] = (1.0 - alpha) * roi[:, :, c] + alpha * foreground_color[:, :, c]
|
136 |
|
137 |
+
# Place the modified ROI back into the original image
|
138 |
+
background[y : y + fh, x : x + fw] = roi
|
139 |
|
140 |
+
return background
|
141 |
|
|
|
|
|
|
|
|
|
142 |
|
143 |
+
def create_training_data(
|
144 |
+
background_path, segmentation_path, image_path, ground_truth_path
|
145 |
+
):
|
146 |
+
background = cv2.imread(background_path, cv2.IMREAD_COLOR)
|
147 |
+
segmentation = cv2.imread(segmentation_path, cv2.IMREAD_UNCHANGED)
|
148 |
|
149 |
+
if segmentation.shape[2] < 4:
|
150 |
+
raise Exception(f"Image does not have an alpha channel: {segmentation_path}")
|
|
|
151 |
|
152 |
+
file_name = create_random_filename_from_filepath(segmentation_path)
|
153 |
+
image_path = os.path.join(image_path, file_name)
|
154 |
+
ground_truth_path = os.path.join(ground_truth_path, file_name)
|
|
|
|
|
155 |
|
156 |
+
bg_height, bg_width = background.shape[:2]
|
157 |
+
segmentation = augment_and_match_size(
|
158 |
+
segmentation, target_height=bg_height, target_width=bg_width
|
159 |
)
|
160 |
+
ground_truth = create_ground_truth_mask(segmentation)
|
161 |
|
162 |
+
result = merge_images(background, segmentation)
|
163 |
+
result = augment_final_image(result)
|
164 |
|
165 |
+
assert ground_truth.shape[0] == result.shape[0]
|
166 |
+
assert ground_truth.shape[1] == result.shape[1]
|
167 |
|
168 |
+
cv2.imwrite(ground_truth_path, ground_truth)
|
169 |
+
cv2.imwrite(image_path, result)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
|
172 |
def main():
|
|
|
177 |
"-b", "--background", required=True, help="Path to the background image"
|
178 |
)
|
179 |
parser.add_argument(
|
180 |
+
"-s", "--segmentation", required=True, help="Path to the segmentation image"
|
181 |
)
|
182 |
parser.add_argument(
|
183 |
"-im",
|
|
|
200 |
if not os.path.exists(args.groundtruth_path):
|
201 |
os.makedirs(args.groundtruth_path)
|
202 |
|
203 |
+
create_training_data(
|
204 |
+
background_path=args.background,
|
205 |
+
segmentation_path=args.segmentation,
|
206 |
+
image_path=args.image_path,
|
207 |
+
ground_truth_path=args.groundtruth_path,
|
208 |
)
|
209 |
|
210 |
|
util/test.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
from merge_images import augment_and_match_size
|
3 |
+
|
4 |
+
|
5 |
+
if __name__ == "__main__":
|
6 |
+
image = cv2.imread("humans/example01.png", cv2.IMREAD_UNCHANGED)
|
7 |
+
result = augment_and_match_size(image, 600, 1000)
|
8 |
+
cv2.imwrite("dataset/test.png", result)
|