Yossh commited on
Commit
1433dbd
1 Parent(s): 1fcec89

Upload 4 files

Browse files
conditioning_images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0771a9ef980ac5e1ab05a635cdcb601194031d10ac74dec815df9667668c544b
3
+ size 8441614671
depth_overlap_simple_shape.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from huggingface_hub import hf_hub_url
3
+ import datasets
4
+ import os
5
+
6
+ _VERSION = datasets.Version("0.0.1")
7
+
8
+ _DESCRIPTION = "TODO"
9
+ _HOMEPAGE = "TODO"
10
+ _LICENSE = "TODO"
11
+ _CITATION = "TODO"
12
+
13
+ _FEATURES = datasets.Features(
14
+ {
15
+ "image": datasets.Image(),
16
+ "conditioning_image": datasets.Image(),
17
+ "text": datasets.Value("string"),
18
+ },
19
+ )
20
+
21
+ METADATA_URL = hf_hub_url(
22
+ "Yossh/depth_overlap_simple_shape",
23
+ filename="train.jsonl",
24
+ repo_type="dataset",
25
+ )
26
+
27
+ IMAGES_URL = hf_hub_url(
28
+ "Yossh/depth_overlap_simple_shape",
29
+ filename="images.zip",
30
+ repo_type="dataset",
31
+ )
32
+
33
+ CONDITIONING_IMAGES_URL = hf_hub_url(
34
+ "Yossh/depth_overlap_simple_shape",
35
+ filename="conditioning_images.zip",
36
+ repo_type="dataset",
37
+ )
38
+
39
+ _DEFAULT_CONFIG = datasets.BuilderConfig(name="default", version=_VERSION)
40
+
41
+
42
+ class Fill50k(datasets.GeneratorBasedBuilder):
43
+ BUILDER_CONFIGS = [_DEFAULT_CONFIG]
44
+ DEFAULT_CONFIG_NAME = "default"
45
+
46
+ def _info(self):
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=_FEATURES,
50
+ supervised_keys=None,
51
+ homepage=_HOMEPAGE,
52
+ license=_LICENSE,
53
+ citation=_CITATION,
54
+ )
55
+
56
+ def _split_generators(self, dl_manager):
57
+ metadata_path = dl_manager.download(METADATA_URL)
58
+ images_dir = dl_manager.download_and_extract(IMAGES_URL)
59
+ conditioning_images_dir = dl_manager.download_and_extract(
60
+ CONDITIONING_IMAGES_URL
61
+ )
62
+
63
+ return [
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.TRAIN,
66
+ # These kwargs will be passed to _generate_examples
67
+ gen_kwargs={
68
+ "metadata_path": metadata_path,
69
+ "images_dir": images_dir,
70
+ "conditioning_images_dir": conditioning_images_dir,
71
+ },
72
+ ),
73
+ ]
74
+
75
+ def _generate_examples(self, metadata_path, images_dir, conditioning_images_dir):
76
+ metadata = pd.read_json(metadata_path, lines=True)
77
+
78
+ for _, row in metadata.iterrows():
79
+ text = row["text"]
80
+
81
+ image_path = row["image"]
82
+ image_path = os.path.join(images_dir, image_path)
83
+ image = open(image_path, "rb").read()
84
+
85
+ conditioning_image_path = row["conditioning_image"]
86
+ conditioning_image_path = os.path.join(
87
+ conditioning_images_dir, row["conditioning_image"]
88
+ )
89
+ conditioning_image = open(conditioning_image_path, "rb").read()
90
+
91
+ yield row["image"], {
92
+ "text": text,
93
+ "image": {
94
+ "path": image_path,
95
+ "bytes": image,
96
+ },
97
+ "conditioning_image": {
98
+ "path": conditioning_image_path,
99
+ "bytes": conditioning_image,
100
+ },
101
+ }
figure_generate.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image, ImageDraw, ImageFilter
2
+ import numpy as np
3
+ import random
4
+ import math
5
+
6
+ class SimplePolygon:
7
+ pow2 = None
8
+
9
+ def __init__(self, file_name, depth_name, tags_name,resolution=(512, 512), init_color=(255, 255, 255)):
10
+ self.file_name = file_name
11
+ self.depth_name = depth_name
12
+ self.tags_name = tags_name
13
+ self.width = resolution[0]
14
+ self.height = resolution[1]
15
+ self.components= [set([(wi, hi) for wi in range(512) for hi in range(512)])]
16
+ self.img1 = np.full((self.width, self.height, 3), np.random.randint(0, 256, size=(3,), dtype=np.uint8), dtype=np.uint8)
17
+ self.img2 = np.zeros([self.width, self.height, 3], dtype=np.uint8)
18
+ self.count = 0
19
+ if self.pow2 is None:
20
+ self.pow2 = [x**2 for x in range(max(resolution[0],resolution[1])+1)]
21
+ def component_check(self, comp):
22
+ retv = set()
23
+ for wi, hi in comp:
24
+ if 0<=wi<self.width and 0<=hi<self.height:
25
+ retv.add((wi, hi))
26
+ return retv
27
+ def components_check(self, components):
28
+ retv = []
29
+ for i in range(len(components)):
30
+ is_contain = False
31
+ for j in range(i+1, len(components)):
32
+ if components[i] <= components[j]:
33
+ is_contain = True
34
+ break
35
+ if not is_contain:
36
+ retv.append(self.components[i])
37
+ return retv
38
+ def generate(self):
39
+ # 別の図形に含まれている図形は削除
40
+ self.components = self.components_check(self.components)
41
+ # 画像外の座標は除去
42
+ for i in range(len(self.components)):
43
+ self.components[i]=self.component_check(self.components[i])
44
+
45
+ layer = np.zeros_like(self.components, dtype=np.uint16)
46
+ noise = np.zeros(3, dtype=np.int16)
47
+ if random.randint(1, 5)==1:
48
+ noise = np.random.randint(-10, 10, size=(3,), dtype=np.int16)
49
+ for i in range(len(self.components)):
50
+ # ↓に重なってる図形の層+1 のmax
51
+ for j in range(i-1, -1, -1):
52
+ if not self.components[i].isdisjoint(self.components[j]):
53
+ layer[i]=max(layer[i], layer[j]+1)
54
+ # 色付ける
55
+ color = np.random.randint(0, 255, size=(3,), dtype=np.int16)
56
+ vec = np.random.randint(0, 255, size=(3,), dtype=np.int16)
57
+ ep = np.random.normal(0, 2, size=(2,))
58
+ for wi, hi in self.components[i]:
59
+ self.img1[wi][hi] = np.abs((color + vec * (ep[0] * wi / self.width + ep[1] * hi / self.height)+noise) % 512 - 256).astype(np.uint8)
60
+ # depth画像生成
61
+ self.layer_max = np.max(layer)
62
+ normalized_layer = ( (255 * layer) // self.layer_max).astype(np.uint8)
63
+ for wi in range(self.width):
64
+ for hi in range(self.height):
65
+ for i in range(len(self.components)-1, -1, -1):
66
+ if (wi,hi) in self.components[i]:
67
+ self.img2[wi][hi][0] = normalized_layer[i]
68
+ self.img2[wi][hi][1] = normalized_layer[i]
69
+ self.img2[wi][hi][2] = normalized_layer[i]
70
+ break
71
+
72
+ def add_rect(self):
73
+ left_top = [random.randint(0, self.width//10*9), random.randint(0, self.height//10*9)]
74
+ poly_width = random.randint(60, self.width//2)
75
+ poly_height = random.randint(60, self.height//2)
76
+ poly = set()
77
+ for wi in range(left_top[0], min(left_top[0]+poly_width, self.width)):
78
+ for hi in range(left_top[1], min(left_top[1]+poly_height, self.height)):
79
+ poly.add((wi, hi))
80
+ self.components.append(self.rotate_component(poly, random.uniform(-math.pi, math.pi)))
81
+ return poly
82
+
83
+ def add_ellipse(self):
84
+ center = [random.randint(self.width//10, self.width//10*9), random.randint(self.height//10, self.height//10*9)]
85
+ radius_width = random.randint(60, self.width//4)
86
+ radius_height = random.randint(60, self.height//4)
87
+ poly = set()
88
+ r = radius_width**2 * radius_height**2
89
+ h2 = radius_height**2
90
+ w2 = radius_width**2
91
+ for wi in range(self.width):
92
+ xx = h2 * self.pow2[abs(wi-center[0])]
93
+ if xx > r:
94
+ continue
95
+ for hi in range(self.height):
96
+ if xx + w2 * self.pow2[abs(hi-center[1])] <= r:
97
+ poly.add((wi, hi))
98
+ self.components.append(self.rotate_component(poly, random.uniform(-math.pi, math.pi)))
99
+ return poly
100
+ def rotate_component(self, comp, rad):
101
+ retv = set()
102
+ for wi, hi in comp:
103
+ new_wi = math.cos(rad)*(wi-self.width/2)-math.sin(rad)*(hi-self.height/2)+self.width/2
104
+ new_hi = math.sin(rad)*(wi-self.width/2)+math.cos(rad)*(hi-self.height/2)+self.height/2
105
+ new_wi = round(new_wi)
106
+ new_hi = round(new_hi)
107
+ for i in range(-1, 2): # -1,0,1
108
+ for j in range(-1, 2):
109
+ retv.add((new_wi+i, new_hi+j))
110
+ return retv
111
+ def save(self):
112
+ Image.fromarray(self.img1).filter(filter=ImageFilter.GaussianBlur(random.randint(0, 1))).save(self.file_name)
113
+ Image.fromarray(self.img2).save(self.depth_name)
114
+ with open(self.tags_name, "w") as file:
115
+ if self.layer_max is None:
116
+ file.write("")
117
+ else:
118
+ file.write(f"{self.layer_max+1}depth")
119
+
120
+
121
+ import uuid
122
+ import concurrent.futures
123
+ from tqdm import tqdm
124
+ import os
125
+ import threading
126
+
127
+
128
+
129
+ def process_polygon():
130
+ random_uuid = str(uuid.uuid4())
131
+ polygon = SimplePolygon(f"conditioning_images/{random_uuid}.png", f"images/{random_uuid}.png", f"conditioning_images/{random_uuid}.txt")
132
+ for _ in range(random.randint(4, 20)):
133
+ if random.randint(0,1)==0:
134
+ polygon.add_ellipse()
135
+ else:
136
+ polygon.add_rect()
137
+ polygon.generate()
138
+ polygon.save()
139
+
140
+ def main():
141
+ if not os.path.exists("conditioning_images"):
142
+ os.makedirs("conditioning_images")
143
+ if not os.path.exists("images"):
144
+ os.makedirs("images")
145
+ num_processes = 24
146
+ total_runs = 21037
147
+
148
+ with tqdm(total=total_runs, ncols=80) as pbar:
149
+ with concurrent.futures.ProcessPoolExecutor(max_workers=num_processes) as executor:
150
+ futures = [executor.submit(process_polygon) for _ in range(total_runs)]
151
+ for future in concurrent.futures.as_completed(futures):
152
+ try:
153
+ result = future.result()
154
+ pbar.update(1)
155
+ except Exception as e:
156
+ print(f"Error occurred: {e}")
157
+ pbar.update(1)
158
+
159
+ if __name__ == "__main__":
160
+ main()
images.zip CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cf40915ae282673dbdae68b9251bd776bdecaa928c08612ee5e3a0a64c64ecca
3
- size 489238929
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0777317872179bca4f9ed73aa4d034fe01ee111943a7c44cfd934102985a911a
3
+ size 613377917